repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
RonenNess/Fileter
|
fileter/files_iterator.py
|
FilesIterator.match_filters
|
python
|
def match_filters(self, path):
# indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt, ftype in self.__filters:
# handle "Required" filters:
if all_required_match and ftype == self.FilterType.Required and not filt.match(path):
all_required_match = False
# handle "Include" filters:
elif ftype == self.FilterType.Include and filt.match(path):
return True
# handle "Exclude" filters:
elif ftype == self.FilterType.Exclude and filt.match(path):
return False
# if got here it means we processed all filters, and no include/exclude filter was matched.
# return if all required were matched
return all_required_match
|
Get filename and return True if file pass all filters and should be processed.
:param path: path to check.
:return: True if pass filters, false otherwise.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/files_iterator.py#L284-L311
| null |
class FilesIterator(object):
"""
Base class to iterate over file sources and perform pre-defined actions on them.
This class can be used in two ways:
1. as an iterator, if you want to iterate files and use them externally.
2. as an object that have pre-defined processing function and can iterate and process files internally.
For example, we can implement an iterator that iterate over files and add a comment to every first line.
Weather you use this as an iterator or as an object, all file paths will be processed via the process_file() function.
"""
# type of sources you can add
class SourceTypes:
# return just files.
FilesOnly = {"ret_files": True, "ret_folders": False}
# return just folders.
FoldersOnly = {"ret_files": False, "ret_folders": True}
# return both.
FilesAndFolders = {"ret_files": True, "ret_folders": True}
# define the default source type
DefaultSourceType = SourceTypes.FilesOnly
# type of filters we can add to the iterator, and how to use them
class FilterType:
# All required filters must match in order for a file to be processed.
# for example, if you have 2 required filters and file only match 1, it will be ignored.
Required = 0
# If file matches at least one Include filter, it will be processed immediately, even if doesn't
# match all required filters. Note: this filter type collide with Exclude; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Include = 1
# If file matches at least one Exclude filter, it will be ignored immediately, even if it
# match all required filters. Note: this filter type collide with Include; first filter to match
# will determine if the file will be processed or not. Order of filters is meaningful.
Exclude = 2
# define the default filter type
DefaultFilterType = FilterType.Required
def __init__(self):
"""
Init the iterator.
"""
self.__sources = []
self.__filters = []
def add_source(self, source):
"""
Add a source to this iterator.
:param source: files source, must be an object inheriting from sources.SourceAPI.
"""
self.__sources.append(source)
return self
def add_file(self, filepath):
"""
Add a single file source from path (string).
:param filepath: file path as string. can also be a list of files.
"""
self.add_source(FileSource(filepath))
return self
def add_folder(self, path, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively from path (string).
:param path: folder path.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FolderSource(path, depth, **source_type))
return self
def add_pattern(self, pattern, root=".", depth=None, source_type=DefaultSourceType):
"""
Add a recursive folder scan using a linux-style patterns.
:param pattern: pattern or list of patterns to match.
:param root: root to start from (default to '.')
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(PatternSource(pattern, root, depth, **source_type))
return self
def add_filtered_folder(self, path, regex, depth=None, source_type=DefaultSourceType):
"""
Add a folder source to scan recursively, with a regex filter on directories.
:param regex: regex string to filter folders by.
:param depth: if provided will be depth limit. 0 = first level only.
:param source_type: what to return; files only, folders only, or both.
"""
self.add_source(FilteredFolderSource(path, regex, depth, **source_type))
return self
def add_filter(self, files_filter, filter_type=DefaultFilterType):
"""
Add a files filter to this iterator.
For a file to be processed, it must match ALL filters, eg they are added with ADD, not OR.
:param files_filter: filter to apply, must be an object inheriting from filters.FilterAPI.
:param filter_type: filter behavior, see FilterType for details.
"""
self.__filters.append((files_filter, filter_type))
return self
def add_filter_by_pattern(self, pattern, filter_type=DefaultFilterType):
"""
Add a files filter by linux-style pattern to this iterator.
:param pattern: linux-style files pattern (or list of patterns)
"""
self.add_filter(FilterPattern(pattern), filter_type)
return self
def add_filter_by_regex(self, regex_expression, filter_type=DefaultFilterType):
"""
Add a files filter by regex to this iterator.
:param regex_expression: regex string to apply.
"""
self.add_filter(FilterRegex(regex_expression), filter_type)
return self
def add_filter_by_extension(self, extensions, filter_type=DefaultFilterType):
"""
Add a files filter by extensions to this iterator.
:param extensions: single extension or list of extensions to filter by.
for example: ["py", "js", "cpp", ...]
"""
self.add_filter(FilterExtension(extensions), filter_type)
return self
def __iter__(self):
"""
Return self as iterator.
"""
return self.next()
def get_all(self):
"""
return all files in this iterator as list.
"""
return [x for x in iter(self)]
def process_all(self):
"""
Iterate internally over all files and call process_file().
Use this function if you want to use this iterator with pre-defined processing function, and not
for external iteration.
"""
for _ in self.next():
pass
def dry_run(self):
"""
Iterate over all files and just print them.
This will not call "process_file()", this will only fetch files from all sources
and apply filters on them.
"""
for f in self.next(dryrun=True):
print f
def next(self, dryrun=False):
"""
Iterate over files in all sources.
Use this if you want to iterate files externally.
:param dryrun: if true, will only return all filenames instead of processing them, eg will not
call "process_file" at all, and just show all the files it will scan.
"""
# call the start hook
self.on_start(dryrun)
# store current dir
curr_dir = ""
# iterate over sources
for src in self.__sources:
# call the start_source hook
self.on_start_source(src, dryrun)
# iterate over files
for filename in src.next():
# make sure file pass filters
if not self.match_filters(filename):
continue
# get curr dir to call the directory-enter hook
new_curr_dir = os.path.dirname(filename)
if new_curr_dir != curr_dir:
self.on_enter_dir(new_curr_dir, dryrun)
curr_dir = new_curr_dir
# process file
curr = self.process_file(filename, dryrun)
# if after process we still want to return file for external iteration, return it
if curr is not None:
yield curr
# call the end-source hook
self.on_end_source(src, dryrun)
# call the end iteration hook and raise stop iteration exception
self.on_end(dryrun)
raise StopIteration
def on_enter_dir(self, directory, dryrun):
"""
A hook you can implement to be called when iteration changes directory (called when entered / exit
directories while scanning)
:param directory: the directory we are now in.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start_source(self, source, dryrun):
"""
A hook you can implement to be called when a new source is starting to be processed.
:param source: the source we started processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end_source(self, source, dryrun):
"""
A hook you can implement to be called when we finish iterating a source.
:param source: the source we finished processing.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_start(self, dryrun):
"""
A hook you can implement to be called when an iteration starts.
For example, you can use this to open output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def on_end(self, dryrun):
"""
A hook you can implement to be called when an iteration ends.
For example, you can use this to close output file, log, etc.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
"""
pass
def process_file(self, path, dryrun):
"""
This function is called for every file processed.
When using this class as an iterator, this function can return None to skip files, or
process their names before returned.
:param path: current file path.
:param dryrun: indicate if we are currently in dry-run mode and should not change files.
:return: should return filename, or None if you want to omit this file from the iteration loop.
"""
return path
|
RonenNess/Fileter
|
fileter/sources/folder_source.py
|
FolderSource.next
|
python
|
def next(self):
# get depth of starting root directory
base_depth = self.__root.count(os.path.sep)
# walk files and folders
for root, subFolders, files in os.walk(self.__root):
# apply folder filter
if not self.filter_folder(root):
continue
# make sure we don't pass depth limit
if self.__depth_limit is not None:
curr_depth = root.count(os.path.sep)
if curr_depth - base_depth > self.__depth_limit:
continue
# if need to return folders return it
if self.__ret_folders:
yield root
# return files
if self.__ret_files:
for f in files:
yield os.path.join(root, f)
# end iterator
raise StopIteration
|
Return all files in folder.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/sources/folder_source.py#L41-L71
|
[
"def filter_folder(self, folder):\n \"\"\"\n Optional filter to apply on folders. If return False will skip this whole folder tree.\n \"\"\"\n return True\n"
] |
class FolderSource(SourceAPI):
"""
A recirsive folders source to scan.
"""
def __init__(self, root, depth_limit=None, ret_files=True, ret_folders=False):
"""
Init the folders source with root folder.
:param root: root folder to scan.
:param depth_limit: how many levels to go deep recursively.
None (default) = infinite depth.
0 = non recursive.
:param ret_files: if true (default), will return files when iterating.
:param ret_folders: if true, will return folders when iterating.
"""
self.__root = root
self.__depth_limit = depth_limit
self.__ret_files = ret_files
self.__ret_folders = ret_folders
def filter_folder(self, folder):
"""
Optional filter to apply on folders. If return False will skip this whole folder tree.
"""
return True
|
RonenNess/Fileter
|
fileter/filters/extension_filter.py
|
FilterExtension.match
|
python
|
def match(self, filepath):
# no extension?
if filepath.find(".") == -1:
return False
# match extension
return filepath.lower().split(".")[-1] in self.__extensions
|
The function to check file.
Should return True if match, False otherwise.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/filters/extension_filter.py#L24-L34
| null |
class FilterExtension(FilterAPI):
"""
A simple filter by a file extensions.
"""
def __init__(self, extensions):
"""
Create the extensions filter.
:param extensions: a single extension or a list of extensions to accept.
Note: without the dot, for example: ["py", "js", "cpp", ...]
"""
self.__extensions = extensions if isinstance(extensions, (list, tuple)) else [extensions]
|
RonenNess/Fileter
|
fileter/filters/pattern_filter.py
|
FilterPattern.match
|
python
|
def match(self, filepath):
for pattern in self.__pattern:
if len(fnmatch.filter([filepath], pattern)) > 0:
return True
return False
|
The function to check file.
Should return True if match, False otherwise.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/filters/pattern_filter.py#L24-L32
| null |
class FilterPattern(FilterAPI):
"""
A simple filter by linux-style file patterns.
"""
def __init__(self, pattern):
"""
Create the extensions filter.
:param pattern: a single pattern or a list of patterns to accept.
"""
self.__pattern = pattern if isinstance(pattern, (list, tuple)) else [pattern]
|
RonenNess/Fileter
|
fileter/iterators/remove_files.py
|
RemoveFiles.process_file
|
python
|
def process_file(self, path, dryrun):
# if dryrun just return file path
if dryrun:
return path
# remove and return file
if self.__force or raw_input("Remove file '%s'? [y/N]" % path).lower() == "y":
os.remove(path)
return path
|
Remove files and return filename.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/iterators/remove_files.py#L27-L38
| null |
class RemoveFiles(files_iterator.FilesIterator):
"""
This iterator will remove all files.
"""
def __init__(self, force=False):
"""
concat all source files into one output file.
:param force: if true, will just remove all files. Else, will ask for every file before.
"""
super(RemoveFiles, self).__init__()
self.__force = force
|
RonenNess/Fileter
|
fileter/sources/files_pattern.py
|
PatternSource.match_pattern
|
python
|
def match_pattern(self, path):
for pattern in self.__pattern:
if len(fnmatch.filter([path], pattern)) > 0:
return True
return False
|
Return if given path match the pattern(s).
:param path: path to check.
:return: True if match, False otherwise.
|
train
|
https://github.com/RonenNess/Fileter/blob/5372221b4049d5d46a9926573b91af17681c81f3/fileter/sources/files_pattern.py#L69-L79
| null |
class PatternSource(SourceAPI):
"""
A recursive folders scanner with pattern.
"""
def __init__(self, pattern, root='.', depth_limit=None, ret_files=True, ret_folders=False):
"""
Init the folders source with root folder.
:param pattern: fnmatch pattern(s) to match.
can be a single string or a list of strings.
:param root: root folder to scan. default to '.'.
:param depth_limit: how many levels to go deep recursively.
None (default) = infinite depth.
0 = non recursive.
:param ret_files: if true (default), will return files when iterating.
:param ret_folders: if true, will return folders when iterating.
"""
self.__pattern = pattern if isinstance(pattern, (list, tuple)) else [pattern]
self.__root = root
self.__depth_limit = depth_limit
self.__ret_files = ret_files
self.__ret_folders = ret_folders
def next(self):
"""
Return all files in folder.
"""
# get depth of starting root directory
base_depth = self.__root.count(os.path.sep)
# walk files and folders
for root, subFolders, files in os.walk(self.__root):
# make sure we don't pass depth limit
if self.__depth_limit is not None:
curr_depth = root.count(os.path.sep)
if curr_depth - base_depth > self.__depth_limit:
continue
# if required to return folders and folder match patterns, return folder
if self.__ret_folders and self.match_pattern(root):
yield root
# iterate files
for f in files:
# get current file path
curr_file = os.path.join(root, f)
# if match return file
if self.match_pattern(curr_file):
yield curr_file
# end iterator
raise StopIteration
|
jeroyang/txttk
|
txttk/report.py
|
Report.update
|
python
|
def update(self, report):
self.tp.extend(pack_boxes(report.tp, self.title))
self.fp.extend(pack_boxes(report.fp, self.title))
self.fn.extend(pack_boxes(report.fn, self.title))
|
Add the items from the given report.
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/report.py#L117-L123
|
[
"def pack_boxes(list_of_content, tag):\n return [TagBox(content, tag) for content in list_of_content]\n"
] |
class Report:
"""
Holding the results of experiment, presenting the precision, recall,
f1 score of the experiment.
"""
def __init__(self, tp=[], fp=[], fn=[], title=None):
"""
tp: the ture positive items
fp: the false positive items
fn: the false negative items
title: the title of this report
"""
self.tp = pack_boxes(tp, title)
self.fp = pack_boxes(fp, title)
self.fn = pack_boxes(fn, title)
self.title = title
def precision(self):
try:
return float(len(self.tp)) / (len(self.tp) + len(self.fp))
except ZeroDivisionError:
return 0.0
def recall(self):
try:
return float(len(self.tp)) / (len(self.tp) + len(self.fn))
except ZeroDivisionError:
return 0.0
def f1(self):
r = self.recall()
p = self.precision()
try:
return float(2 * r * p) / (r + p)
except ZeroDivisionError:
return 0.0
def __repr__(self):
r = self.recall()
p = self.precision()
f = self.f1()
syntax = 'Report<P{p:.3f} R{r:.3f} F{f:.3f} {t!r}>'
return syntax.format(p=p, r=r, f=f, t=self.title)
@classmethod
def from_reports(cls, reports, title):
if len(reports) != len(set([rep.title for rep in reports])):
raise KeyError('Cannt merge reports with same titles')
metareport = cls([], [], [], title)
for report in reports:
metareport.update(report)
return metareport
def split(self):
tag2report = OrderedDict()
try:
for tagbox, _ in self.tp:
tag2report.setdefault(tagbox.tag, Report()).tp.append(tagbox.content)
for tagbox, _ in self.fp:
tag2report.setdefault(tagbox.tag, Report()).fp.append(tagbox.content)
for tagbox, _ in self.fn:
tag2report.setdefault(tagbox.tag, Report()).fn.append(tagbox.content)
for tag, report in tag2report.items():
report.title = tag
except AttributeError:
raise AssertionError('The report cannot be split')
return list(tag2report.values())
@classmethod
def from_scale(cls, gold_number, precision, recall, title):
"""
deprecated, for backward compactbility
try to use from_score
"""
tp_count = get_numerator(recall, gold_number)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = gold_number - tp_count
scale_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return scale_report
@classmethod
def from_score(cls, precision, recall, title, goldstandard_size=1000):
tp_count = get_numerator(recall, goldstandard_size)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = goldstandard_size - tp_count
score_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return score_report
def plot(self, split_report=False, **argkw):
with prplot(**argkw) as ax:
size = argkw.get('size', 6)
fontsize = argkw.get('fontsize', 2*size)
ax.set_title(self.title)
if split_report:
reports = self.split()
max_goldnum = max([len(report.tp)+len(report.fn) for report in reports])
for report in self.split():
ax.scatter(report.precision(), report.recall(),
s=100.0*(len(report.tp)+len(report.fn))/max_goldnum*size, zorder=10)
ax.annotate(report.title, (report.precision(), report.recall()), fontsize=fontsize, zorder=11)
else:
ax.scatter(self.precision(), self.recall())
def html_table(self, split_report=False):
html_template = """<table>
<tr>
<th>Title</th>
<th>Ture Positive</th>
<th>False Positive</th>
<th>False Negative</th>
<th>Precision</th>
<th>Recall</th>
<th>F-measure</th>
</tr>
{}
</table>"""
line_template = """<tr>
<th>{}</th>
<th>{}</th>
<th>{}</th>
<th>{}</th>
<th>{:.3f}</th>
<th>{:.3f}</th>
<th>{:.3f}</th>
</tr>"""
lines = []
if split_report:
reports = self.split()
else:
reports = [self]
for report in reports:
line = line_template.format(report.title,
len(report.tp),
len(report.fp),
len(report.fn),
report.precision(),
report.recall(),
report.f1())
lines.append(line)
body = '\n'.join(lines)
return html_template.format(body)
|
jeroyang/txttk
|
txttk/report.py
|
Report.from_scale
|
python
|
def from_scale(cls, gold_number, precision, recall, title):
tp_count = get_numerator(recall, gold_number)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = gold_number - tp_count
scale_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return scale_report
|
deprecated, for backward compactbility
try to use from_score
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/report.py#L150-L163
|
[
"def get_numerator(ratio, max_denominator):\n fraction = Fraction.from_float(ratio).limit_denominator(max_denominator)\n return int(fraction.numerator * max_denominator / fraction.denominator)\n",
"def get_denominator(ratio, max_numerator):\n return get_numerator(1/ratio, max_numerator)\n"
] |
class Report:
"""
Holding the results of experiment, presenting the precision, recall,
f1 score of the experiment.
"""
def __init__(self, tp=[], fp=[], fn=[], title=None):
"""
tp: the ture positive items
fp: the false positive items
fn: the false negative items
title: the title of this report
"""
self.tp = pack_boxes(tp, title)
self.fp = pack_boxes(fp, title)
self.fn = pack_boxes(fn, title)
self.title = title
def precision(self):
try:
return float(len(self.tp)) / (len(self.tp) + len(self.fp))
except ZeroDivisionError:
return 0.0
def recall(self):
try:
return float(len(self.tp)) / (len(self.tp) + len(self.fn))
except ZeroDivisionError:
return 0.0
def f1(self):
r = self.recall()
p = self.precision()
try:
return float(2 * r * p) / (r + p)
except ZeroDivisionError:
return 0.0
def __repr__(self):
r = self.recall()
p = self.precision()
f = self.f1()
syntax = 'Report<P{p:.3f} R{r:.3f} F{f:.3f} {t!r}>'
return syntax.format(p=p, r=r, f=f, t=self.title)
def update(self, report):
"""
Add the items from the given report.
"""
self.tp.extend(pack_boxes(report.tp, self.title))
self.fp.extend(pack_boxes(report.fp, self.title))
self.fn.extend(pack_boxes(report.fn, self.title))
@classmethod
def from_reports(cls, reports, title):
if len(reports) != len(set([rep.title for rep in reports])):
raise KeyError('Cannt merge reports with same titles')
metareport = cls([], [], [], title)
for report in reports:
metareport.update(report)
return metareport
def split(self):
tag2report = OrderedDict()
try:
for tagbox, _ in self.tp:
tag2report.setdefault(tagbox.tag, Report()).tp.append(tagbox.content)
for tagbox, _ in self.fp:
tag2report.setdefault(tagbox.tag, Report()).fp.append(tagbox.content)
for tagbox, _ in self.fn:
tag2report.setdefault(tagbox.tag, Report()).fn.append(tagbox.content)
for tag, report in tag2report.items():
report.title = tag
except AttributeError:
raise AssertionError('The report cannot be split')
return list(tag2report.values())
@classmethod
@classmethod
def from_score(cls, precision, recall, title, goldstandard_size=1000):
tp_count = get_numerator(recall, goldstandard_size)
positive_count = get_denominator(precision, tp_count)
fp_count = positive_count - tp_count
fn_count = goldstandard_size - tp_count
score_report = cls(['tp'] * tp_count,
['fp'] * fp_count,
['fn'] * fn_count,
title)
return score_report
def plot(self, split_report=False, **argkw):
with prplot(**argkw) as ax:
size = argkw.get('size', 6)
fontsize = argkw.get('fontsize', 2*size)
ax.set_title(self.title)
if split_report:
reports = self.split()
max_goldnum = max([len(report.tp)+len(report.fn) for report in reports])
for report in self.split():
ax.scatter(report.precision(), report.recall(),
s=100.0*(len(report.tp)+len(report.fn))/max_goldnum*size, zorder=10)
ax.annotate(report.title, (report.precision(), report.recall()), fontsize=fontsize, zorder=11)
else:
ax.scatter(self.precision(), self.recall())
def html_table(self, split_report=False):
html_template = """<table>
<tr>
<th>Title</th>
<th>Ture Positive</th>
<th>False Positive</th>
<th>False Negative</th>
<th>Precision</th>
<th>Recall</th>
<th>F-measure</th>
</tr>
{}
</table>"""
line_template = """<tr>
<th>{}</th>
<th>{}</th>
<th>{}</th>
<th>{}</th>
<th>{:.3f}</th>
<th>{:.3f}</th>
<th>{:.3f}</th>
</tr>"""
lines = []
if split_report:
reports = self.split()
else:
reports = [self]
for report in reports:
line = line_template.format(report.title,
len(report.tp),
len(report.fp),
len(report.fn),
report.precision(),
report.recall(),
report.f1())
lines.append(line)
body = '\n'.join(lines)
return html_template.format(body)
|
jeroyang/txttk
|
txttk/retools.py
|
condense
|
python
|
def condense(ss_unescaped):
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
|
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L10-L73
|
[
"def estimated_len(longg, short):\n return (3\n + len(short)\n + sum(map(len, longg))\n - len(longg)\n * (len(short) - 1)\n - 1 )\n",
"def stupid_len(longg):\n return sum(map(len, longg)) + len(longg)\n"
] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/retools.py
|
is_solid
|
python
|
def is_solid(regex):
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
|
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L75-L104
| null |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/retools.py
|
danger_unpack
|
python
|
def danger_unpack(regex):
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
|
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L122-L139
|
[
"def is_packed(regex):\n \"\"\"\n Check if the regex is solid and packed into a pair of parens\n \"\"\"\n return is_solid(regex) and regex[0] == '('\n"
] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/retools.py
|
unpack
|
python
|
def unpack(regex):
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
|
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L141-L157
|
[
"def is_packed(regex):\n \"\"\"\n Check if the regex is solid and packed into a pair of parens\n \"\"\"\n return is_solid(regex) and regex[0] == '('\n"
] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/retools.py
|
parallel
|
python
|
def parallel(regex_list, sort=False):
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
|
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L159-L175
| null |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/retools.py
|
nocatch
|
python
|
def nocatch(regex):
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
|
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L177-L186
|
[
"def is_solid(regex):\n \"\"\"\n Check the given regular expression is solid.\n\n >>> is_solid(r'a')\n True\n >>> is_solid(r'[ab]')\n True\n >>> is_solid(r'(a|b|c)')\n True\n >>> is_solid(r'(a|b|c)?')\n True\n >>> is_solid(r'(a|b)(c)')\n False\n >>> is_solid(r'(a|b)(c)?')\n False\n \"\"\"\n\n shape = re.sub(r'(\\\\.|[^\\[\\]\\(\\)\\|\\?\\+\\*])', '#', regex)\n skeleton = shape.replace('#', '')\n if len(shape) <= 1:\n return True\n if re.match(r'^\\[[^\\]]*\\][\\*\\+\\?]?$', shape):\n return True\n if re.match(r'^\\([^\\(]*\\)[\\*\\+\\?]?$', shape):\n return True\n if re.match(r'^\\(\\)#*?\\)\\)', skeleton):\n return True\n else:\n return False\n",
"def is_packed(regex):\n \"\"\"\n Check if the regex is solid and packed into a pair of parens\n \"\"\"\n return is_solid(regex) and regex[0] == '('\n",
"def danger_unpack(regex):\n \"\"\"\n Remove the outermost parens\n\n >>> unpack(r'(abc)')\n 'abc'\n >>> unpack(r'(?:abc)')\n 'abc'\n >>> unpack(r'(?P<xyz>abc)')\n 'abc'\n >>> unpack(r'[abc]')\n '[abc]'\n \"\"\"\n\n if is_packed(regex):\n return re.sub(r'^\\((\\?(:|P<.*?>))?(?P<content>.*?)\\)$', r'\\g<content>', regex)\n else:\n return regex\n"
] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def concat(regex_list):
"""
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
"""
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/retools.py
|
concat
|
python
|
def concat(regex_list):
output_list = []
for regex in regex_list:
output_list.append(consolidate(regex))
return r''.join(output_list)
|
Concat multiple regular expression into one, if the given regular expression is not packed,
a pair of paren will be add.
>>> reg_1 = r'a|b'
>>> reg_2 = r'(c|d|e)'
>>> concat([reg_1, reg2])
(a|b)(c|d|e)
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L188-L202
|
[
"def consolidate(regex):\n \"\"\"\n Put on a pair of parens (with no catch tag) outside the regex,\n if the regex is not yet consolidated\n \"\"\"\n if is_solid(regex):\n return regex\n else:\n return '({})'.format(regex)\n"
] |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import defaultdict, OrderedDict
from itertools import combinations
import re
def condense(ss_unescaped):
"""
Given multiple strings, returns a compressed regular expression just
for these strings
>>> condense(['she', 'he', 'her', 'hemoglobin'])
'he(moglobin|r)?|she'
"""
def estimated_len(longg, short):
return (3
+ len(short)
+ sum(map(len, longg))
- len(longg)
* (len(short) - 1)
- 1 )
def stupid_len(longg):
return sum(map(len, longg)) + len(longg)
ss = [re.escape(s) for s in set(ss_unescaped)]
ss.sort(key=len)
short2long = defaultdict(lambda: {'p':[],'s':[]})
for short, longg in combinations(ss, 2):
if longg.startswith(short):
short2long[short]['p'].append(longg)
if longg.endswith(short):
short2long[short]['s'].append(longg)
short2long = sorted(list(short2long.items()),
key=lambda x: len(x[0]),
reverse=True)
output = []
objs = set(ss)
for s, pre_sur in short2long:
pp = set(pre_sur['p']) & objs
ss = set(pre_sur['s']) & objs
if ((stupid_len(pp) - estimated_len(pp, s))
< (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'({heads})?{surfix}'
.format(surfix=s,
heads='|'.join(sorted([p[:-len(s)] for p in ss],
key=len,
reverse=True))))
assert len(reg) == estimated_len(ss, s)
output.append(reg)
objs -= (ss | set([s]))
elif ((stupid_len(pp) - estimated_len(pp, s))
> (stupid_len(ss) - estimated_len(ss, s))):
reg = (r'{prefix}({tails})?'
.format(prefix=s,
tails='|'.join(sorted([p[len(s):] for p in pp],
key=len,
reverse=True))))
assert len(reg) == estimated_len(pp, s)
output.append(reg)
objs -= (pp | set([s]))
for residual in objs:
output.append(residual)
return re.sub(r'\(([^)])\)\?', r'\1?', r'|'.join(output))
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False
def is_packed(regex):
"""
Check if the regex is solid and packed into a pair of parens
"""
return is_solid(regex) and regex[0] == '('
def consolidate(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet consolidated
"""
if is_solid(regex):
return regex
else:
return '({})'.format(regex)
def danger_unpack(regex):
"""
Remove the outermost parens
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'abc'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex):
return re.sub(r'^\((\?(:|P<.*?>))?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex
def parallel(regex_list, sort=False):
"""
Join the given regexes using r'|'
if the sort=True, regexes will be sorted by lenth before processing
>>> parallel([r'abc', r'def'])
'abc|def'
>>> parallel([r'abc', r'd|ef'])
'abc|def'
>>> parallel([r'abc', r'(d|ef)'])
'abc|d|ef'
>>> parallel([r'abc', r'defg'])
'defg|abc'
"""
if sort:
regex_list = sorted(regex_list, key=len, reverse=True)
return '|'.join([unpack(regex) for regex in regex_list])
def nocatch(regex):
"""
Put on a pair of parens (with no catch tag) outside the regex,
if the regex is not yet packed;
modified the outmost parens by adding nocatch tag
"""
if is_solid(regex) and not is_packed(regex):
return regex
else:
return '(?:{})'.format(danger_unpack(regex))
def nocatchall(regex):
"""
Return a regex with all parens has a no catch tag
"""
return re.sub(r'(?<!\\)(?P<leading>(\\\\)*)\((\?(:|P<.*?>))?', r'\g<leading>(?:', regex)
def option(regex):
"""
return a regex has a option tag
>>> option(r'[ab]')
'[ab]?'
>>> option(r'(abc)')
'(abc)?'
>>> option('abc')
'(abc)?'
"""
return nocatch(regex) + '?'
|
jeroyang/txttk
|
txttk/nlptools.py
|
sent_tokenize
|
python
|
def sent_tokenize(context):
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
|
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L12-L45
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
def slim_stem(token):
"""
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
"""
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""
Return a generator of n-gram from an iterable
"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
def power_ngram(iter_tokens):
"""
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
"""
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
jeroyang/txttk
|
txttk/nlptools.py
|
clause_tokenize
|
python
|
def clause_tokenize(sentence):
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
|
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L57-L68
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_tokenize(context):
"""
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
"""
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
def slim_stem(token):
"""
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
"""
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""
Return a generator of n-gram from an iterable
"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
def power_ngram(iter_tokens):
"""
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
"""
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
jeroyang/txttk
|
txttk/nlptools.py
|
word_tokenize
|
python
|
def word_tokenize(sentence):
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
|
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L70-L89
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_tokenize(context):
"""
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
"""
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
def slim_stem(token):
"""
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
"""
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""
Return a generator of n-gram from an iterable
"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
def power_ngram(iter_tokens):
"""
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
"""
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
jeroyang/txttk
|
txttk/nlptools.py
|
slim_stem
|
python
|
def slim_stem(token):
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
|
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L91-L107
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_tokenize(context):
"""
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
"""
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""
Return a generator of n-gram from an iterable
"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
def power_ngram(iter_tokens):
"""
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
"""
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
jeroyang/txttk
|
txttk/nlptools.py
|
ngram
|
python
|
def ngram(n, iter_tokens):
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
|
Return a generator of n-gram from an iterable
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L116-L121
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_tokenize(context):
"""
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
"""
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
def slim_stem(token):
"""
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
"""
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def power_ngram(iter_tokens):
"""
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
"""
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
jeroyang/txttk
|
txttk/nlptools.py
|
power_ngram
|
python
|
def power_ngram(iter_tokens):
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
|
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L123-L128
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_tokenize(context):
"""
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
"""
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
def slim_stem(token):
"""
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
"""
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""
Return a generator of n-gram from an iterable
"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
def count_start(tokenizer):
"""
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
"""
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
jeroyang/txttk
|
txttk/nlptools.py
|
count_start
|
python
|
def count_start(tokenizer):
def wrapper(context, base):
tokens = list(tokenizer(context))
flag = 0
for token in tokens:
start = context.index(token, flag)
flag = start + len(token)
yield (token, base + start)
return wrapper
|
A decorator which wrap the given tokenizer to yield (token, start).
Notice! the decorated tokenizer must take a int arguments stands for the start position of the input context/sentence
>>> tokenizer = lambda sentence: sentence.split(' ')
>>> tokenizer('The quick brown fox jumps over the lazy dog')
['The', 'quick', 'brown', 'fox', 'jumps', 'over', 'the',
'lazy', 'dog']
>>> tokenizer = count_start(tokenizer)
>>> tokenizer('The quick brown fox jumps over the lazy dog', 0)
('The', 0)
('quick', 4)
...
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/nlptools.py#L130-L154
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import re
from itertools import chain, combinations, cycle, islice
from collections import namedtuple
def sent_tokenize(context):
"""
Cut the given context into sentences.
Avoid a linebreak in between paried symbols, float numbers, and some abbrs.
Nothing will be discard after sent_tokeinze, simply ''.join(sents) will get the original context.
Evey whitespace, tab, linebreak will be kept.
>>> context = "I love you. Please don't leave."
>>> sent_tokenize(context)
["I love you. ", "Please don't leave."]
"""
# Define the regular expression
paired_symbols = [("(", ")"),
("[", "]"),
("{", "}")]
paired_patterns = ["%s.*?%s" % (re.escape(lt), re.escape(rt)) for lt, rt in paired_symbols]
number_pattern = ['\d+\.\d+']
arr_pattern = ['(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?']
# Find the string which matches the above pattern, and remove than from the context, to get a stem string
escape_re = re.compile("|".join(paired_patterns + number_pattern + arr_pattern))
escapes = escape_re.findall(context)
escaped_stem = escape_re.sub('{}', context)
escaped_escaped_stem = escaped_stem.replace('{','{{').replace('}', '}}')
# Find the linebreaks
sent_re = re.compile(r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))')
linebreaks = sent_re.findall(escaped_escaped_stem)
sent_stem = sent_re.sub(r'\1###linebreak###', escaped_escaped_stem)
recovered_sent_stem = sent_stem.replace('{{}}', '{}')
result = recovered_sent_stem.format(*escapes)
return [r for r in result.split('###linebreak###') if r is not '']
def sent_count(context):
"""
Return the sentence counts for given context
>>> context = "I love you. Please don't leave."
>>> sent_count(context)
2
"""
return len(sent_tokenize(context))
def clause_tokenize(sentence):
"""
Split on comma or parenthesis, if there are more then three words for each clause
>>> context = 'While I was walking home, this bird fell down in front of me.'
>>> clause_tokenize(context)
['While I was walking home,', ' this bird fell down in front of me.']
"""
clause_re = re.compile(r'((?:\S+\s){2,}\S+,|(?:\S+\s){3,}(?=\((?:\S+\s){2,}\S+\)))')
clause_stem = clause_re.sub(r'\1###clausebreak###', sentence)
return [c for c in clause_stem.split('###clausebreak###') if c != '']
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0)
def slim_stem(token):
"""
A very simple stemmer, for entity of GO stemming.
>>> token = 'interaction'
>>> slim_stem(token)
'interact'
"""
target_sulfixs = ['ic', 'tic', 'e', 'ive', 'ing', 'ical', 'nal', 'al', 'ism', 'ion', 'ation', 'ar', 'sis', 'us', 'ment']
for sulfix in sorted(target_sulfixs, key=len, reverse=True):
if token.endswith(sulfix):
token = token[0:-len(sulfix)]
break
if token.endswith('ll'):
token = token[:-1]
return token
def powerset(iterable):
"""
powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)
"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def ngram(n, iter_tokens):
"""
Return a generator of n-gram from an iterable
"""
z = len(iter_tokens)
return (iter_tokens[i:i+n] for i in range(z-n+1))
def power_ngram(iter_tokens):
"""
Generate unigram, bigram, trigram ... and the max-gram,
different from powerset(), this function will not generate skipped combinations such as (1,3)
"""
return chain.from_iterable(ngram(j, iter_tokens) for j in range(1, len(iter_tokens) + 1))
|
jeroyang/txttk
|
txttk/feature.py
|
lexical
|
python
|
def lexical(token):
lowercase = token.lower()
first4 = lowercase[:4]
last4 = lowercase[-4:]
return OrderedDict([
('lowercase', lowercase),
('first4', first4),
('last4', last4)
])
|
Extract lexical features from given token
There are 3 kinds of lexical features, take 'Hello' as an example:
1. lowercase: 'hello'
2. first4: 'hell'
3. last4: 'ello'
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/feature.py#L12-L28
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import OrderedDict
import re
import string
def _char_shape(char):
if char in string.ascii_uppercase:
return 'A'
if char in string.ascii_lowercase:
return 'a'
if char in string.digits:
return '0'
else:
return char
def _shape(token):
return ''.join([_char_shape(char) for char in token])
def _contains_a_letter(token):
regex = r'[A-Za-z]'
if re.search(regex, token):
return True
else:
return False
def _contains_a_capital(token):
regex = r'[A-Z]'
if re.search(regex, token):
return True
else:
return False
def _begins_with_capital(token):
return _char_shape(token[0]) == 'A'
def _all_capital(token):
regex = r'^[A-Z]+$'
if re.match(regex, token):
return True
else:
return False
def _contains_a_digit(token):
regex = r'\d'
if re.search(regex, token):
return True
else:
return False
def _all_digit(token):
regex = r'^\d+$'
if re.match(regex, token):
return True
else:
return False
def _contains_a_punctuation(token):
return len(set(string.punctuation) & set(token)) > 0
def _consists_letters_n_digits(token):
shape = _shape(token)
return set(shape.lower()) == set('a0')
def _consists_digits_n_punctuations(token):
shape = _shape(token)
lower_shape = shape.lower()
return set(lower_shape) <= set(string.punctuation+'0') and len(lower_shape) >= 2
def orthographic(token):
"""
Extract orthographic features from a given token
There are 11 kinds of orthographic features, take 'Windows10' as an example:
1. shape: 'Aaaaaaa00'
2. length: 9
3. contains_a_letter: True
4. contains_a_capital: True
5. begins_with_capital: True
6. all_capital: False
7. contains_a_digit: True
8. all_digit: False
9. contains_a_punctuation: False
10. consists_letters_n_digits: True
11. consists_digits_n_punctuations: False
"""
return OrderedDict([
('shape', _shape(token)),
('length', len(token)),
('contains_a_letter', _contains_a_letter(token)),
('contains_a_capital', _contains_a_capital(token)),
('begins_with_capital', _begins_with_capital(token)),
('all_capital', _all_capital(token)),
('contains_a_digit', _contains_a_digit(token)),
('all_digit', _all_digit(token)),
('contains_a_punctuation', _contains_a_punctuation(token)),
('consists_letters_n_digits', _consists_letters_n_digits(token)),
('consists_digits_n_punctuations', _consists_digits_n_punctuations(token)),
])
|
jeroyang/txttk
|
txttk/feature.py
|
orthographic
|
python
|
def orthographic(token):
return OrderedDict([
('shape', _shape(token)),
('length', len(token)),
('contains_a_letter', _contains_a_letter(token)),
('contains_a_capital', _contains_a_capital(token)),
('begins_with_capital', _begins_with_capital(token)),
('all_capital', _all_capital(token)),
('contains_a_digit', _contains_a_digit(token)),
('all_digit', _all_digit(token)),
('contains_a_punctuation', _contains_a_punctuation(token)),
('consists_letters_n_digits', _consists_letters_n_digits(token)),
('consists_digits_n_punctuations', _consists_digits_n_punctuations(token)),
])
|
Extract orthographic features from a given token
There are 11 kinds of orthographic features, take 'Windows10' as an example:
1. shape: 'Aaaaaaa00'
2. length: 9
3. contains_a_letter: True
4. contains_a_capital: True
5. begins_with_capital: True
6. all_capital: False
7. contains_a_digit: True
8. all_digit: False
9. contains_a_punctuation: False
10. consists_letters_n_digits: True
11. consists_digits_n_punctuations: False
|
train
|
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/feature.py#L93-L124
|
[
"def _shape(token):\n return ''.join([_char_shape(char) for char in token])\n",
"def _contains_a_letter(token):\n regex = r'[A-Za-z]'\n if re.search(regex, token):\n return True\n else:\n return False\n",
"def _contains_a_capital(token):\n regex = r'[A-Z]'\n if re.search(regex, token):\n return True\n else:\n return False\n",
"def _begins_with_capital(token):\n return _char_shape(token[0]) == 'A'\n",
"def _all_capital(token):\n regex = r'^[A-Z]+$'\n if re.match(regex, token):\n return True\n else:\n return False\n",
"def _contains_a_digit(token):\n regex = r'\\d'\n if re.search(regex, token):\n return True\n else:\n return False\n",
"def _all_digit(token):\n regex = r'^\\d+$'\n if re.match(regex, token):\n return True\n else:\n return False\n",
"def _contains_a_punctuation(token):\n return len(set(string.punctuation) & set(token)) > 0\n",
"def _consists_letters_n_digits(token):\n shape = _shape(token)\n return set(shape.lower()) == set('a0')\n",
"def _consists_digits_n_punctuations(token):\n shape = _shape(token)\n lower_shape = shape.lower()\n return set(lower_shape) <= set(string.punctuation+'0') and len(lower_shape) >= 2\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
from collections import OrderedDict
import re
import string
def lexical(token):
"""
Extract lexical features from given token
There are 3 kinds of lexical features, take 'Hello' as an example:
1. lowercase: 'hello'
2. first4: 'hell'
3. last4: 'ello'
"""
lowercase = token.lower()
first4 = lowercase[:4]
last4 = lowercase[-4:]
return OrderedDict([
('lowercase', lowercase),
('first4', first4),
('last4', last4)
])
def _char_shape(char):
if char in string.ascii_uppercase:
return 'A'
if char in string.ascii_lowercase:
return 'a'
if char in string.digits:
return '0'
else:
return char
def _shape(token):
return ''.join([_char_shape(char) for char in token])
def _contains_a_letter(token):
regex = r'[A-Za-z]'
if re.search(regex, token):
return True
else:
return False
def _contains_a_capital(token):
regex = r'[A-Z]'
if re.search(regex, token):
return True
else:
return False
def _begins_with_capital(token):
return _char_shape(token[0]) == 'A'
def _all_capital(token):
regex = r'^[A-Z]+$'
if re.match(regex, token):
return True
else:
return False
def _contains_a_digit(token):
regex = r'\d'
if re.search(regex, token):
return True
else:
return False
def _all_digit(token):
regex = r'^\d+$'
if re.match(regex, token):
return True
else:
return False
def _contains_a_punctuation(token):
return len(set(string.punctuation) & set(token)) > 0
def _consists_letters_n_digits(token):
shape = _shape(token)
return set(shape.lower()) == set('a0')
def _consists_digits_n_punctuations(token):
shape = _shape(token)
lower_shape = shape.lower()
return set(lower_shape) <= set(string.punctuation+'0') and len(lower_shape) >= 2
def orthographic(token):
"""
Extract orthographic features from a given token
There are 11 kinds of orthographic features, take 'Windows10' as an example:
1. shape: 'Aaaaaaa00'
2. length: 9
3. contains_a_letter: True
4. contains_a_capital: True
5. begins_with_capital: True
6. all_capital: False
7. contains_a_digit: True
8. all_digit: False
9. contains_a_punctuation: False
10. consists_letters_n_digits: True
11. consists_digits_n_punctuations: False
"""
return OrderedDict([
('shape', _shape(token)),
('length', len(token)),
('contains_a_letter', _contains_a_letter(token)),
('contains_a_capital', _contains_a_capital(token)),
('begins_with_capital', _begins_with_capital(token)),
('all_capital', _all_capital(token)),
('contains_a_digit', _contains_a_digit(token)),
('all_digit', _all_digit(token)),
('contains_a_punctuation', _contains_a_punctuation(token)),
('consists_letters_n_digits', _consists_letters_n_digits(token)),
('consists_digits_n_punctuations', _consists_digits_n_punctuations(token)),
])
|
delfick/gitmit
|
gitmit/mit.py
|
GitTimes.relpath_for
|
python
|
def relpath_for(self, path):
if self.parent_dir in (".", ""):
return path
if path == self.parent_dir:
return ""
dirname = os.path.dirname(path) or "."
basename = os.path.basename(path)
cached = self.relpath_cache.get(dirname, empty)
if cached is empty:
cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)
return os.path.join(cached, basename)
|
Find the relative path from here from the parent_dir
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/mit.py#L57-L72
| null |
class GitTimes(object):
"""
Responsible for determining what files we want commit times for and then
finding those commit times.
The order is something like:
* Git root under ``root_folder``
* Files under ``parent_dir`` relative to the git root
* Only including those files in ``timestamps_for`` relative to parent_dir
* Exclude files in ``exclude`` relative to parent_dir
* Re-include any files in ``include`` relative to parent_dir
Where ``timestamps_for``, ``include`` and ``exclude`` are lists of glob
patterns.
``with_cache`` determines whether to write a cache of the found commit
times under the .git folder and use them instead of trying to find the
commit times each time.
The cache is invalidated when the parent_dir and files to find change.
"""
def __init__(self, root_folder, parent_dir, timestamps_for=None, include=None, exclude=None, silent=False, with_cache=True, debug=False):
self.debug = debug
self.silent = silent
self.include = include
self.exclude = exclude
self.with_cache = with_cache
self.parent_dir = parent_dir
self.root_folder = root_folder
self.timestamps_for = timestamps_for
self.relpath_cache = {}
def find(self):
"""
Find all the files we want to find commit times for, and any extra files
under symlinks.
Then find the commit times for those files and return a dictionary of
{relative_path: commit_time_as_epoch}
"""
mtimes = {}
git = Repo(self.root_folder)
all_files = git.all_files()
use_files = set(self.find_files_for_use(all_files))
# the git index won't find the files under a symlink :(
# And we include files under a symlink as seperate copies of the files
# So we still want to generate modified times for those files
extras = set(self.extra_symlinked_files(use_files))
# Combine use_files and extras
use_files.update(extras)
# Tell the user something
if not self.silent:
log.info("Finding modified times for %s/%s git controlled files in %s", len(use_files), len(all_files), self.root_folder)
# Finally get the dates from git!
return self.commit_times_for(git, use_files)
def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath))
def extra_symlinked_files(self, potential_symlinks):
"""
Find any symlinkd folders and yield SymlinkdPath objects for each file
that is found under the symlink.
"""
for key in list(potential_symlinks):
location = os.path.join(self.root_folder, key.path)
real_location = os.path.realpath(location)
if os.path.islink(location) and os.path.isdir(real_location):
for root, dirs, files in os.walk(real_location, followlinks=True):
for name in files:
# So this is joining the name of the symlink
# With the name of the file, relative to the real location of the symlink
full_path = os.path.join(root, name)
rel_location = os.path.relpath(full_path, real_location)
symlinkd_path = os.path.join(key.path, rel_location)
# We then get that relative to the parent dir
dir_part = os.path.relpath(root, real_location)
symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))
# And we need the original file location so we can get a commit time for the symlinkd path
real_path = os.path.realpath(full_path)
real_root_folder = os.path.realpath(self.root_folder)
real_relpath = os.path.relpath(real_path, real_root_folder)
# So that's path relative to root_folder, path relative to parent_folder
# and path relative to root for the target
yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)
def find_files_for_use(self, all_files):
"""
Given a list of all the files to consider, only yield Path objects
for those we care about, given our filters
"""
for path in all_files:
# Find the path relative to the parent dir
relpath = self.relpath_for(path)
# Don't care about the ./
if relpath.startswith("./"):
relpath = relpath[2:]
# Only care about paths that aren't filtered
if not self.is_filtered(relpath):
yield Path(path, relpath)
def is_filtered(self, relpath):
"""Say whether this relpath is filtered out"""
# Only include files under the parent_dir
if relpath.startswith("../"):
return True
# Ignore files that we don't want timestamps from
if self.timestamps_for is not None and type(self.timestamps_for) is list:
match = False
for line in self.timestamps_for:
if fnmatch.fnmatch(relpath, line):
match = True
break
if not match:
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self.exclude or not any([self.exclude, self.include])
# Anything not matching exclude gets included
if self.exclude:
for line in self.exclude:
if fnmatch.fnmatch(relpath, line):
matched = False
# Anything matching include gets included
if self.include:
for line in self.include:
if fnmatch.fnmatch(relpath, line):
matched = True
break
return not matched
|
delfick/gitmit
|
gitmit/mit.py
|
GitTimes.find
|
python
|
def find(self):
mtimes = {}
git = Repo(self.root_folder)
all_files = git.all_files()
use_files = set(self.find_files_for_use(all_files))
# the git index won't find the files under a symlink :(
# And we include files under a symlink as seperate copies of the files
# So we still want to generate modified times for those files
extras = set(self.extra_symlinked_files(use_files))
# Combine use_files and extras
use_files.update(extras)
# Tell the user something
if not self.silent:
log.info("Finding modified times for %s/%s git controlled files in %s", len(use_files), len(all_files), self.root_folder)
# Finally get the dates from git!
return self.commit_times_for(git, use_files)
|
Find all the files we want to find commit times for, and any extra files
under symlinks.
Then find the commit times for those files and return a dictionary of
{relative_path: commit_time_as_epoch}
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/mit.py#L74-L101
|
[
"def commit_times_for(self, git, use_files):\n \"\"\"\n Return commit times for the use_files specified.\n\n We will use a cache of commit times if self.with_cache is Truthy.\n\n Finally, we yield (relpath: epoch) pairs where path is relative\n to self.parent_dir and epoch is the commit time in UTC for that path.\n \"\"\"\n # Use real_relpath if it exists (SymlinkdPath) and default to just the path\n # This is because we _want_ to compare the commits to the _real paths_\n # As git only cares about the symlink itself, rather than files under it\n # We also want to make sure that the symlink targets are included in use_files\n # If they've been excluded by the filters\n use_files_paths = set([getattr(p, \"real_relpath\", p.path) for p in use_files if p.relpath])\n\n # Find us the first commit to consider\n first_commit = str(git.first_commit)\n\n # Try and get our cached commit times\n # If we get a commit then it means we have a match for this parent/sorted_relpaths\n commit_times = {}\n cached_commit, cached_commit_times = None, {}\n if self.with_cache:\n sorted_relpaths = sorted([p.relpath for p in use_files])\n cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)\n\n if cached_commit == first_commit:\n commit_times = cached_commit_times\n\n # If we couldn't find cached commit times, we have to do some work\n if not commit_times:\n for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):\n for path in different_paths:\n commit_times[path] = commit_time\n\n if self.with_cache:\n set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)\n\n # Finally, yield the (relpath, commit_time) for all the files we care about.\n for key in use_files:\n if key.relpath:\n path = getattr(key, \"real_relpath\", key.path)\n relpath = getattr(key, \"real_relpath\", key.relpath)\n if path in commit_times:\n yield key.relpath, commit_times[path]\n else:\n log.warning(\"Couldn't find commit time for {0}\".format(relpath))\n",
"def extra_symlinked_files(self, potential_symlinks):\n \"\"\"\n Find any symlinkd folders and yield SymlinkdPath objects for each file\n that is found under the symlink.\n \"\"\"\n for key in list(potential_symlinks):\n location = os.path.join(self.root_folder, key.path)\n real_location = os.path.realpath(location)\n\n if os.path.islink(location) and os.path.isdir(real_location):\n for root, dirs, files in os.walk(real_location, followlinks=True):\n for name in files:\n # So this is joining the name of the symlink\n # With the name of the file, relative to the real location of the symlink\n full_path = os.path.join(root, name)\n rel_location = os.path.relpath(full_path, real_location)\n symlinkd_path = os.path.join(key.path, rel_location)\n\n # We then get that relative to the parent dir\n dir_part = os.path.relpath(root, real_location)\n symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))\n\n # And we need the original file location so we can get a commit time for the symlinkd path\n real_path = os.path.realpath(full_path)\n real_root_folder = os.path.realpath(self.root_folder)\n real_relpath = os.path.relpath(real_path, real_root_folder)\n\n # So that's path relative to root_folder, path relative to parent_folder\n # and path relative to root for the target\n yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)\n",
"def find_files_for_use(self, all_files):\n \"\"\"\n Given a list of all the files to consider, only yield Path objects\n for those we care about, given our filters\n \"\"\"\n for path in all_files:\n # Find the path relative to the parent dir\n relpath = self.relpath_for(path)\n\n # Don't care about the ./\n if relpath.startswith(\"./\"):\n relpath = relpath[2:]\n\n # Only care about paths that aren't filtered\n if not self.is_filtered(relpath):\n yield Path(path, relpath)\n",
"def all_files(self):\n \"\"\"Return a set of all the files under git control\"\"\"\n return set([entry.decode() for entry, _ in self.git.open_index().items()])\n"
] |
class GitTimes(object):
"""
Responsible for determining what files we want commit times for and then
finding those commit times.
The order is something like:
* Git root under ``root_folder``
* Files under ``parent_dir`` relative to the git root
* Only including those files in ``timestamps_for`` relative to parent_dir
* Exclude files in ``exclude`` relative to parent_dir
* Re-include any files in ``include`` relative to parent_dir
Where ``timestamps_for``, ``include`` and ``exclude`` are lists of glob
patterns.
``with_cache`` determines whether to write a cache of the found commit
times under the .git folder and use them instead of trying to find the
commit times each time.
The cache is invalidated when the parent_dir and files to find change.
"""
def __init__(self, root_folder, parent_dir, timestamps_for=None, include=None, exclude=None, silent=False, with_cache=True, debug=False):
self.debug = debug
self.silent = silent
self.include = include
self.exclude = exclude
self.with_cache = with_cache
self.parent_dir = parent_dir
self.root_folder = root_folder
self.timestamps_for = timestamps_for
self.relpath_cache = {}
def relpath_for(self, path):
"""Find the relative path from here from the parent_dir"""
if self.parent_dir in (".", ""):
return path
if path == self.parent_dir:
return ""
dirname = os.path.dirname(path) or "."
basename = os.path.basename(path)
cached = self.relpath_cache.get(dirname, empty)
if cached is empty:
cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)
return os.path.join(cached, basename)
def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath))
def extra_symlinked_files(self, potential_symlinks):
"""
Find any symlinkd folders and yield SymlinkdPath objects for each file
that is found under the symlink.
"""
for key in list(potential_symlinks):
location = os.path.join(self.root_folder, key.path)
real_location = os.path.realpath(location)
if os.path.islink(location) and os.path.isdir(real_location):
for root, dirs, files in os.walk(real_location, followlinks=True):
for name in files:
# So this is joining the name of the symlink
# With the name of the file, relative to the real location of the symlink
full_path = os.path.join(root, name)
rel_location = os.path.relpath(full_path, real_location)
symlinkd_path = os.path.join(key.path, rel_location)
# We then get that relative to the parent dir
dir_part = os.path.relpath(root, real_location)
symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))
# And we need the original file location so we can get a commit time for the symlinkd path
real_path = os.path.realpath(full_path)
real_root_folder = os.path.realpath(self.root_folder)
real_relpath = os.path.relpath(real_path, real_root_folder)
# So that's path relative to root_folder, path relative to parent_folder
# and path relative to root for the target
yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)
def find_files_for_use(self, all_files):
"""
Given a list of all the files to consider, only yield Path objects
for those we care about, given our filters
"""
for path in all_files:
# Find the path relative to the parent dir
relpath = self.relpath_for(path)
# Don't care about the ./
if relpath.startswith("./"):
relpath = relpath[2:]
# Only care about paths that aren't filtered
if not self.is_filtered(relpath):
yield Path(path, relpath)
def is_filtered(self, relpath):
"""Say whether this relpath is filtered out"""
# Only include files under the parent_dir
if relpath.startswith("../"):
return True
# Ignore files that we don't want timestamps from
if self.timestamps_for is not None and type(self.timestamps_for) is list:
match = False
for line in self.timestamps_for:
if fnmatch.fnmatch(relpath, line):
match = True
break
if not match:
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self.exclude or not any([self.exclude, self.include])
# Anything not matching exclude gets included
if self.exclude:
for line in self.exclude:
if fnmatch.fnmatch(relpath, line):
matched = False
# Anything matching include gets included
if self.include:
for line in self.include:
if fnmatch.fnmatch(relpath, line):
matched = True
break
return not matched
|
delfick/gitmit
|
gitmit/mit.py
|
GitTimes.commit_times_for
|
python
|
def commit_times_for(self, git, use_files):
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath))
|
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/mit.py#L103-L150
|
[
"def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths):\n \"\"\"\n Get the cached commit times for the combination of this parent_dir and relpaths\n\n Return the commit assigned to this combination and the actual times!\n \"\"\"\n result = get_all_cached_commit_times(root_folder)\n\n for item in result:\n if sorted(item.get(\"sorted_relpaths\", [])) == sorted_relpaths and item.get(\"parent_dir\") == parent_dir:\n return item.get(\"commit\"), item.get(\"commit_times\")\n\n return None, {}\n",
"def set_cached_commit_times(root_folder, parent_dir, first_commit, commit_times, sorted_relpaths):\n \"\"\"\n Set the cached commit times in a json file at cache_location(root_folder)\n\n We first get what is currently in the cache and either modify the existing\n entry for this combo of parent_dir and sorted_relpaths.\n\n Or add to the entries.\n\n We then ensure there's less than 5 entries to keep the cache from growing\n too large (arbitrary number is arbitrary).\n\n Finally, we write the cache or issue a warning if we can't.\n \"\"\"\n current = get_all_cached_commit_times(root_folder)\n location = cache_location(root_folder)\n\n found = False\n for item in current:\n if sorted(item.get(\"sorted_relpaths\", [])) == sorted_relpaths and item.get(\"parent_dir\") == parent_dir:\n item[\"commit_times\"] = commit_times\n item[\"commit\"] = str(first_commit)\n found = True\n break\n\n if not found:\n current.append({\"commit\": str(first_commit), \"parent_dir\": parent_dir, \"commit_times\": commit_times, \"sorted_relpaths\": sorted_relpaths})\n\n # Make sure it doesn't grow too big....\n # Arbitrary number is arbitrary\n while len(current) > 5:\n current.pop(0)\n\n try:\n log.info(\"Writing gitmit cached commit_times\\tlocation=%s\", location)\n with open(location, \"w\") as fle:\n json.dump(current, fle)\n except (TypeError, ValueError, IOError) as error:\n log.warning(\"Failed to dump gitmit mtime cache\\tlocation=%s\\terror=%s\", location, error)\n"
] |
class GitTimes(object):
"""
Responsible for determining what files we want commit times for and then
finding those commit times.
The order is something like:
* Git root under ``root_folder``
* Files under ``parent_dir`` relative to the git root
* Only including those files in ``timestamps_for`` relative to parent_dir
* Exclude files in ``exclude`` relative to parent_dir
* Re-include any files in ``include`` relative to parent_dir
Where ``timestamps_for``, ``include`` and ``exclude`` are lists of glob
patterns.
``with_cache`` determines whether to write a cache of the found commit
times under the .git folder and use them instead of trying to find the
commit times each time.
The cache is invalidated when the parent_dir and files to find change.
"""
def __init__(self, root_folder, parent_dir, timestamps_for=None, include=None, exclude=None, silent=False, with_cache=True, debug=False):
self.debug = debug
self.silent = silent
self.include = include
self.exclude = exclude
self.with_cache = with_cache
self.parent_dir = parent_dir
self.root_folder = root_folder
self.timestamps_for = timestamps_for
self.relpath_cache = {}
def relpath_for(self, path):
"""Find the relative path from here from the parent_dir"""
if self.parent_dir in (".", ""):
return path
if path == self.parent_dir:
return ""
dirname = os.path.dirname(path) or "."
basename = os.path.basename(path)
cached = self.relpath_cache.get(dirname, empty)
if cached is empty:
cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)
return os.path.join(cached, basename)
def find(self):
"""
Find all the files we want to find commit times for, and any extra files
under symlinks.
Then find the commit times for those files and return a dictionary of
{relative_path: commit_time_as_epoch}
"""
mtimes = {}
git = Repo(self.root_folder)
all_files = git.all_files()
use_files = set(self.find_files_for_use(all_files))
# the git index won't find the files under a symlink :(
# And we include files under a symlink as seperate copies of the files
# So we still want to generate modified times for those files
extras = set(self.extra_symlinked_files(use_files))
# Combine use_files and extras
use_files.update(extras)
# Tell the user something
if not self.silent:
log.info("Finding modified times for %s/%s git controlled files in %s", len(use_files), len(all_files), self.root_folder)
# Finally get the dates from git!
return self.commit_times_for(git, use_files)
def extra_symlinked_files(self, potential_symlinks):
"""
Find any symlinkd folders and yield SymlinkdPath objects for each file
that is found under the symlink.
"""
for key in list(potential_symlinks):
location = os.path.join(self.root_folder, key.path)
real_location = os.path.realpath(location)
if os.path.islink(location) and os.path.isdir(real_location):
for root, dirs, files in os.walk(real_location, followlinks=True):
for name in files:
# So this is joining the name of the symlink
# With the name of the file, relative to the real location of the symlink
full_path = os.path.join(root, name)
rel_location = os.path.relpath(full_path, real_location)
symlinkd_path = os.path.join(key.path, rel_location)
# We then get that relative to the parent dir
dir_part = os.path.relpath(root, real_location)
symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))
# And we need the original file location so we can get a commit time for the symlinkd path
real_path = os.path.realpath(full_path)
real_root_folder = os.path.realpath(self.root_folder)
real_relpath = os.path.relpath(real_path, real_root_folder)
# So that's path relative to root_folder, path relative to parent_folder
# and path relative to root for the target
yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)
def find_files_for_use(self, all_files):
"""
Given a list of all the files to consider, only yield Path objects
for those we care about, given our filters
"""
for path in all_files:
# Find the path relative to the parent dir
relpath = self.relpath_for(path)
# Don't care about the ./
if relpath.startswith("./"):
relpath = relpath[2:]
# Only care about paths that aren't filtered
if not self.is_filtered(relpath):
yield Path(path, relpath)
def is_filtered(self, relpath):
"""Say whether this relpath is filtered out"""
# Only include files under the parent_dir
if relpath.startswith("../"):
return True
# Ignore files that we don't want timestamps from
if self.timestamps_for is not None and type(self.timestamps_for) is list:
match = False
for line in self.timestamps_for:
if fnmatch.fnmatch(relpath, line):
match = True
break
if not match:
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self.exclude or not any([self.exclude, self.include])
# Anything not matching exclude gets included
if self.exclude:
for line in self.exclude:
if fnmatch.fnmatch(relpath, line):
matched = False
# Anything matching include gets included
if self.include:
for line in self.include:
if fnmatch.fnmatch(relpath, line):
matched = True
break
return not matched
|
delfick/gitmit
|
gitmit/mit.py
|
GitTimes.extra_symlinked_files
|
python
|
def extra_symlinked_files(self, potential_symlinks):
for key in list(potential_symlinks):
location = os.path.join(self.root_folder, key.path)
real_location = os.path.realpath(location)
if os.path.islink(location) and os.path.isdir(real_location):
for root, dirs, files in os.walk(real_location, followlinks=True):
for name in files:
# So this is joining the name of the symlink
# With the name of the file, relative to the real location of the symlink
full_path = os.path.join(root, name)
rel_location = os.path.relpath(full_path, real_location)
symlinkd_path = os.path.join(key.path, rel_location)
# We then get that relative to the parent dir
dir_part = os.path.relpath(root, real_location)
symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))
# And we need the original file location so we can get a commit time for the symlinkd path
real_path = os.path.realpath(full_path)
real_root_folder = os.path.realpath(self.root_folder)
real_relpath = os.path.relpath(real_path, real_root_folder)
# So that's path relative to root_folder, path relative to parent_folder
# and path relative to root for the target
yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)
|
Find any symlinkd folders and yield SymlinkdPath objects for each file
that is found under the symlink.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/mit.py#L152-L181
| null |
class GitTimes(object):
"""
Responsible for determining what files we want commit times for and then
finding those commit times.
The order is something like:
* Git root under ``root_folder``
* Files under ``parent_dir`` relative to the git root
* Only including those files in ``timestamps_for`` relative to parent_dir
* Exclude files in ``exclude`` relative to parent_dir
* Re-include any files in ``include`` relative to parent_dir
Where ``timestamps_for``, ``include`` and ``exclude`` are lists of glob
patterns.
``with_cache`` determines whether to write a cache of the found commit
times under the .git folder and use them instead of trying to find the
commit times each time.
The cache is invalidated when the parent_dir and files to find change.
"""
def __init__(self, root_folder, parent_dir, timestamps_for=None, include=None, exclude=None, silent=False, with_cache=True, debug=False):
self.debug = debug
self.silent = silent
self.include = include
self.exclude = exclude
self.with_cache = with_cache
self.parent_dir = parent_dir
self.root_folder = root_folder
self.timestamps_for = timestamps_for
self.relpath_cache = {}
def relpath_for(self, path):
"""Find the relative path from here from the parent_dir"""
if self.parent_dir in (".", ""):
return path
if path == self.parent_dir:
return ""
dirname = os.path.dirname(path) or "."
basename = os.path.basename(path)
cached = self.relpath_cache.get(dirname, empty)
if cached is empty:
cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)
return os.path.join(cached, basename)
def find(self):
"""
Find all the files we want to find commit times for, and any extra files
under symlinks.
Then find the commit times for those files and return a dictionary of
{relative_path: commit_time_as_epoch}
"""
mtimes = {}
git = Repo(self.root_folder)
all_files = git.all_files()
use_files = set(self.find_files_for_use(all_files))
# the git index won't find the files under a symlink :(
# And we include files under a symlink as seperate copies of the files
# So we still want to generate modified times for those files
extras = set(self.extra_symlinked_files(use_files))
# Combine use_files and extras
use_files.update(extras)
# Tell the user something
if not self.silent:
log.info("Finding modified times for %s/%s git controlled files in %s", len(use_files), len(all_files), self.root_folder)
# Finally get the dates from git!
return self.commit_times_for(git, use_files)
def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath))
def find_files_for_use(self, all_files):
"""
Given a list of all the files to consider, only yield Path objects
for those we care about, given our filters
"""
for path in all_files:
# Find the path relative to the parent dir
relpath = self.relpath_for(path)
# Don't care about the ./
if relpath.startswith("./"):
relpath = relpath[2:]
# Only care about paths that aren't filtered
if not self.is_filtered(relpath):
yield Path(path, relpath)
def is_filtered(self, relpath):
"""Say whether this relpath is filtered out"""
# Only include files under the parent_dir
if relpath.startswith("../"):
return True
# Ignore files that we don't want timestamps from
if self.timestamps_for is not None and type(self.timestamps_for) is list:
match = False
for line in self.timestamps_for:
if fnmatch.fnmatch(relpath, line):
match = True
break
if not match:
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self.exclude or not any([self.exclude, self.include])
# Anything not matching exclude gets included
if self.exclude:
for line in self.exclude:
if fnmatch.fnmatch(relpath, line):
matched = False
# Anything matching include gets included
if self.include:
for line in self.include:
if fnmatch.fnmatch(relpath, line):
matched = True
break
return not matched
|
delfick/gitmit
|
gitmit/mit.py
|
GitTimes.find_files_for_use
|
python
|
def find_files_for_use(self, all_files):
for path in all_files:
# Find the path relative to the parent dir
relpath = self.relpath_for(path)
# Don't care about the ./
if relpath.startswith("./"):
relpath = relpath[2:]
# Only care about paths that aren't filtered
if not self.is_filtered(relpath):
yield Path(path, relpath)
|
Given a list of all the files to consider, only yield Path objects
for those we care about, given our filters
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/mit.py#L183-L198
|
[
"def relpath_for(self, path):\n \"\"\"Find the relative path from here from the parent_dir\"\"\"\n if self.parent_dir in (\".\", \"\"):\n return path\n\n if path == self.parent_dir:\n return \"\"\n\n dirname = os.path.dirname(path) or \".\"\n basename = os.path.basename(path)\n\n cached = self.relpath_cache.get(dirname, empty)\n if cached is empty:\n cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)\n\n return os.path.join(cached, basename)\n",
"def is_filtered(self, relpath):\n \"\"\"Say whether this relpath is filtered out\"\"\"\n # Only include files under the parent_dir\n if relpath.startswith(\"../\"):\n return True\n\n # Ignore files that we don't want timestamps from\n if self.timestamps_for is not None and type(self.timestamps_for) is list:\n match = False\n for line in self.timestamps_for:\n if fnmatch.fnmatch(relpath, line):\n match = True\n break\n if not match:\n return True\n\n # Matched is true by default if\n # * Have exclude\n # * No exclude and no include\n matched = self.exclude or not any([self.exclude, self.include])\n\n # Anything not matching exclude gets included\n if self.exclude:\n for line in self.exclude:\n if fnmatch.fnmatch(relpath, line):\n matched = False\n\n # Anything matching include gets included\n if self.include:\n for line in self.include:\n if fnmatch.fnmatch(relpath, line):\n matched = True\n break\n\n return not matched\n"
] |
class GitTimes(object):
"""
Responsible for determining what files we want commit times for and then
finding those commit times.
The order is something like:
* Git root under ``root_folder``
* Files under ``parent_dir`` relative to the git root
* Only including those files in ``timestamps_for`` relative to parent_dir
* Exclude files in ``exclude`` relative to parent_dir
* Re-include any files in ``include`` relative to parent_dir
Where ``timestamps_for``, ``include`` and ``exclude`` are lists of glob
patterns.
``with_cache`` determines whether to write a cache of the found commit
times under the .git folder and use them instead of trying to find the
commit times each time.
The cache is invalidated when the parent_dir and files to find change.
"""
def __init__(self, root_folder, parent_dir, timestamps_for=None, include=None, exclude=None, silent=False, with_cache=True, debug=False):
self.debug = debug
self.silent = silent
self.include = include
self.exclude = exclude
self.with_cache = with_cache
self.parent_dir = parent_dir
self.root_folder = root_folder
self.timestamps_for = timestamps_for
self.relpath_cache = {}
def relpath_for(self, path):
"""Find the relative path from here from the parent_dir"""
if self.parent_dir in (".", ""):
return path
if path == self.parent_dir:
return ""
dirname = os.path.dirname(path) or "."
basename = os.path.basename(path)
cached = self.relpath_cache.get(dirname, empty)
if cached is empty:
cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)
return os.path.join(cached, basename)
def find(self):
"""
Find all the files we want to find commit times for, and any extra files
under symlinks.
Then find the commit times for those files and return a dictionary of
{relative_path: commit_time_as_epoch}
"""
mtimes = {}
git = Repo(self.root_folder)
all_files = git.all_files()
use_files = set(self.find_files_for_use(all_files))
# the git index won't find the files under a symlink :(
# And we include files under a symlink as seperate copies of the files
# So we still want to generate modified times for those files
extras = set(self.extra_symlinked_files(use_files))
# Combine use_files and extras
use_files.update(extras)
# Tell the user something
if not self.silent:
log.info("Finding modified times for %s/%s git controlled files in %s", len(use_files), len(all_files), self.root_folder)
# Finally get the dates from git!
return self.commit_times_for(git, use_files)
def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath))
def extra_symlinked_files(self, potential_symlinks):
"""
Find any symlinkd folders and yield SymlinkdPath objects for each file
that is found under the symlink.
"""
for key in list(potential_symlinks):
location = os.path.join(self.root_folder, key.path)
real_location = os.path.realpath(location)
if os.path.islink(location) and os.path.isdir(real_location):
for root, dirs, files in os.walk(real_location, followlinks=True):
for name in files:
# So this is joining the name of the symlink
# With the name of the file, relative to the real location of the symlink
full_path = os.path.join(root, name)
rel_location = os.path.relpath(full_path, real_location)
symlinkd_path = os.path.join(key.path, rel_location)
# We then get that relative to the parent dir
dir_part = os.path.relpath(root, real_location)
symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))
# And we need the original file location so we can get a commit time for the symlinkd path
real_path = os.path.realpath(full_path)
real_root_folder = os.path.realpath(self.root_folder)
real_relpath = os.path.relpath(real_path, real_root_folder)
# So that's path relative to root_folder, path relative to parent_folder
# and path relative to root for the target
yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)
def is_filtered(self, relpath):
"""Say whether this relpath is filtered out"""
# Only include files under the parent_dir
if relpath.startswith("../"):
return True
# Ignore files that we don't want timestamps from
if self.timestamps_for is not None and type(self.timestamps_for) is list:
match = False
for line in self.timestamps_for:
if fnmatch.fnmatch(relpath, line):
match = True
break
if not match:
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self.exclude or not any([self.exclude, self.include])
# Anything not matching exclude gets included
if self.exclude:
for line in self.exclude:
if fnmatch.fnmatch(relpath, line):
matched = False
# Anything matching include gets included
if self.include:
for line in self.include:
if fnmatch.fnmatch(relpath, line):
matched = True
break
return not matched
|
delfick/gitmit
|
gitmit/mit.py
|
GitTimes.is_filtered
|
python
|
def is_filtered(self, relpath):
# Only include files under the parent_dir
if relpath.startswith("../"):
return True
# Ignore files that we don't want timestamps from
if self.timestamps_for is not None and type(self.timestamps_for) is list:
match = False
for line in self.timestamps_for:
if fnmatch.fnmatch(relpath, line):
match = True
break
if not match:
return True
# Matched is true by default if
# * Have exclude
# * No exclude and no include
matched = self.exclude or not any([self.exclude, self.include])
# Anything not matching exclude gets included
if self.exclude:
for line in self.exclude:
if fnmatch.fnmatch(relpath, line):
matched = False
# Anything matching include gets included
if self.include:
for line in self.include:
if fnmatch.fnmatch(relpath, line):
matched = True
break
return not matched
|
Say whether this relpath is filtered out
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/mit.py#L200-L234
| null |
class GitTimes(object):
"""
Responsible for determining what files we want commit times for and then
finding those commit times.
The order is something like:
* Git root under ``root_folder``
* Files under ``parent_dir`` relative to the git root
* Only including those files in ``timestamps_for`` relative to parent_dir
* Exclude files in ``exclude`` relative to parent_dir
* Re-include any files in ``include`` relative to parent_dir
Where ``timestamps_for``, ``include`` and ``exclude`` are lists of glob
patterns.
``with_cache`` determines whether to write a cache of the found commit
times under the .git folder and use them instead of trying to find the
commit times each time.
The cache is invalidated when the parent_dir and files to find change.
"""
def __init__(self, root_folder, parent_dir, timestamps_for=None, include=None, exclude=None, silent=False, with_cache=True, debug=False):
self.debug = debug
self.silent = silent
self.include = include
self.exclude = exclude
self.with_cache = with_cache
self.parent_dir = parent_dir
self.root_folder = root_folder
self.timestamps_for = timestamps_for
self.relpath_cache = {}
def relpath_for(self, path):
"""Find the relative path from here from the parent_dir"""
if self.parent_dir in (".", ""):
return path
if path == self.parent_dir:
return ""
dirname = os.path.dirname(path) or "."
basename = os.path.basename(path)
cached = self.relpath_cache.get(dirname, empty)
if cached is empty:
cached = self.relpath_cache[dirname] = os.path.relpath(dirname, self.parent_dir)
return os.path.join(cached, basename)
def find(self):
"""
Find all the files we want to find commit times for, and any extra files
under symlinks.
Then find the commit times for those files and return a dictionary of
{relative_path: commit_time_as_epoch}
"""
mtimes = {}
git = Repo(self.root_folder)
all_files = git.all_files()
use_files = set(self.find_files_for_use(all_files))
# the git index won't find the files under a symlink :(
# And we include files under a symlink as seperate copies of the files
# So we still want to generate modified times for those files
extras = set(self.extra_symlinked_files(use_files))
# Combine use_files and extras
use_files.update(extras)
# Tell the user something
if not self.silent:
log.info("Finding modified times for %s/%s git controlled files in %s", len(use_files), len(all_files), self.root_folder)
# Finally get the dates from git!
return self.commit_times_for(git, use_files)
def commit_times_for(self, git, use_files):
"""
Return commit times for the use_files specified.
We will use a cache of commit times if self.with_cache is Truthy.
Finally, we yield (relpath: epoch) pairs where path is relative
to self.parent_dir and epoch is the commit time in UTC for that path.
"""
# Use real_relpath if it exists (SymlinkdPath) and default to just the path
# This is because we _want_ to compare the commits to the _real paths_
# As git only cares about the symlink itself, rather than files under it
# We also want to make sure that the symlink targets are included in use_files
# If they've been excluded by the filters
use_files_paths = set([getattr(p, "real_relpath", p.path) for p in use_files if p.relpath])
# Find us the first commit to consider
first_commit = str(git.first_commit)
# Try and get our cached commit times
# If we get a commit then it means we have a match for this parent/sorted_relpaths
commit_times = {}
cached_commit, cached_commit_times = None, {}
if self.with_cache:
sorted_relpaths = sorted([p.relpath for p in use_files])
cached_commit, cached_commit_times = get_cached_commit_times(self.root_folder, self.parent_dir, sorted_relpaths)
if cached_commit == first_commit:
commit_times = cached_commit_times
# If we couldn't find cached commit times, we have to do some work
if not commit_times:
for commit_id, commit_time, different_paths in git.file_commit_times(use_files_paths, debug=self.debug):
for path in different_paths:
commit_times[path] = commit_time
if self.with_cache:
set_cached_commit_times(self.root_folder, self.parent_dir, first_commit, commit_times, sorted_relpaths)
# Finally, yield the (relpath, commit_time) for all the files we care about.
for key in use_files:
if key.relpath:
path = getattr(key, "real_relpath", key.path)
relpath = getattr(key, "real_relpath", key.relpath)
if path in commit_times:
yield key.relpath, commit_times[path]
else:
log.warning("Couldn't find commit time for {0}".format(relpath))
def extra_symlinked_files(self, potential_symlinks):
"""
Find any symlinkd folders and yield SymlinkdPath objects for each file
that is found under the symlink.
"""
for key in list(potential_symlinks):
location = os.path.join(self.root_folder, key.path)
real_location = os.path.realpath(location)
if os.path.islink(location) and os.path.isdir(real_location):
for root, dirs, files in os.walk(real_location, followlinks=True):
for name in files:
# So this is joining the name of the symlink
# With the name of the file, relative to the real location of the symlink
full_path = os.path.join(root, name)
rel_location = os.path.relpath(full_path, real_location)
symlinkd_path = os.path.join(key.path, rel_location)
# We then get that relative to the parent dir
dir_part = os.path.relpath(root, real_location)
symlinkd_relpath = os.path.normpath(os.path.join(key.relpath, dir_part, name))
# And we need the original file location so we can get a commit time for the symlinkd path
real_path = os.path.realpath(full_path)
real_root_folder = os.path.realpath(self.root_folder)
real_relpath = os.path.relpath(real_path, real_root_folder)
# So that's path relative to root_folder, path relative to parent_folder
# and path relative to root for the target
yield SymlinkdPath(symlinkd_path, symlinkd_relpath, real_relpath)
def find_files_for_use(self, all_files):
"""
Given a list of all the files to consider, only yield Path objects
for those we care about, given our filters
"""
for path in all_files:
# Find the path relative to the parent dir
relpath = self.relpath_for(path)
# Don't care about the ./
if relpath.startswith("./"):
relpath = relpath[2:]
# Only care about paths that aren't filtered
if not self.is_filtered(relpath):
yield Path(path, relpath)
|
delfick/gitmit
|
gitmit/repo.py
|
Repo.all_files
|
python
|
def all_files(self):
return set([entry.decode() for entry, _ in self.git.open_index().items()])
|
Return a set of all the files under git control
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/repo.py#L45-L47
| null |
class Repo(object):
"""
Wrapper around a libgit Repository that knows:
* How to get all the files in the repository
* How to get the oid of HEAD
* How to get the commit times of the files we want commit times for
It's written with speed in mind, given the constraints of making
performant code in python!
"""
def __init__(self, root_folder):
self.git = Repository(root_folder)
@property
def first_commit(self):
"""Return the oid of HEAD"""
return self.git.head().decode()
def file_commit_times(self, use_files_paths, debug=False):
"""
Traverse the commits in the repository, starting from HEAD until we have
found the commit times for all the files we care about.
Yield each file once, only when it is found to be changed in some commit.
If self.debug is true, also output log.debug for the speed we are going
through commits (output commits/second every 1000 commits and every
100000 commits)
"""
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break
def entries_in_tree_oid(self, prefix, tree_oid):
"""Find the tree at this oid and return entries prefixed with ``prefix``"""
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree))
def entries_in_tree(self, prefix, tree):
"""
Traverse the entries in this tree and yield (prefix, is_tree, oid)
Where prefix is a tuple of the given prefix and the name of the entry.
"""
for entry in tree.items():
if prefix:
new_prefix = prefix + (entry.path.decode(), )
else:
new_prefix = (entry.path.decode(), )
yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):
"""
Return the entries for this commit, the entries of the parent commits,
and the difference between the two (current_files - parent_files)
"""
if prefix and prefixes and prefix not in prefixes:
return empty, empty
parent_files = set()
for oid in parent_oids:
parent_files.update(self.entries_in_tree_oid(prefix, oid))
current_files = self.entries_in_tree_oid(prefix, current_oid)
return (current_files, parent_files), (current_files - parent_files)
def differences_between(self, current_files, parent_files, changes, prefixes):
"""
yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here is written to squeeze as much performance as possible out
of this operation.
"""
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])
else:
parent_values = defaultdict(set)
parent_changes = parent_files - current_files
for path, is_tree, oid in parent_changes:
if is_tree:
parent_values[path].add(oid)
for path, is_tree, oid in changes:
if is_tree and path not in prefixes:
continue
if not is_tree:
yield path, None, True
else:
parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)
cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)
if changes:
yield cf_and_pf, changes, False
|
delfick/gitmit
|
gitmit/repo.py
|
Repo.file_commit_times
|
python
|
def file_commit_times(self, use_files_paths, debug=False):
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break
|
Traverse the commits in the repository, starting from HEAD until we have
found the commit times for all the files we care about.
Yield each file once, only when it is found to be changed in some commit.
If self.debug is true, also output log.debug for the speed we are going
through commits (output commits/second every 1000 commits and every
100000 commits)
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/repo.py#L54-L99
|
[
"def fill(self, paths):\n \"\"\"\n Initialise the tree.\n\n paths is a list of strings where each string is the relative path to some\n file.\n \"\"\"\n for path in paths:\n tree = self.tree\n parts = tuple(path.split('/'))\n dir_parts = parts[:-1]\n built = ()\n for part in dir_parts:\n self.cache[built] = tree\n built += (part, )\n parent = tree\n tree = parent.folders.get(part, empty)\n if tree is empty:\n tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)\n\n self.cache[dir_parts] = tree\n tree.files.add(parts[-1])\n",
"def remove(self, prefix, name):\n \"\"\"\n Remove a path from the tree\n\n prefix is a tuple of the parts in the dirpath\n\n name is a string representing the name of the file itself.\n\n Any empty folders from the point of the file backwards to the root of\n the tree is removed.\n \"\"\"\n tree = self.cache.get(prefix, empty)\n if tree is empty:\n return False\n\n if name not in tree.files:\n return False\n\n tree.files.remove(name)\n self.remove_folder(tree, list(prefix))\n\n return True\n",
"def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):\n \"\"\"\n Return the entries for this commit, the entries of the parent commits,\n and the difference between the two (current_files - parent_files)\n \"\"\"\n if prefix and prefixes and prefix not in prefixes:\n return empty, empty\n\n parent_files = set()\n for oid in parent_oids:\n parent_files.update(self.entries_in_tree_oid(prefix, oid))\n\n current_files = self.entries_in_tree_oid(prefix, current_oid)\n return (current_files, parent_files), (current_files - parent_files)\n",
"def differences_between(self, current_files, parent_files, changes, prefixes):\n \"\"\"\n yield (thing, changes, is_path)\n\n If is_path is true, changes is None and thing is the path as a tuple.\n\n If is_path is false, thing is the current_files and parent_files for\n that changed treeentry and changes is the difference between current_files\n and parent_files.\n\n The code here is written to squeeze as much performance as possible out\n of this operation.\n \"\"\"\n parent_oid = None\n\n if any(is_tree for _, is_tree, _ in changes):\n if len(changes) == 1:\n wanted_path = list(changes)[0][0]\n parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])\n else:\n parent_values = defaultdict(set)\n parent_changes = parent_files - current_files\n for path, is_tree, oid in parent_changes:\n if is_tree:\n parent_values[path].add(oid)\n\n for path, is_tree, oid in changes:\n if is_tree and path not in prefixes:\n continue\n\n if not is_tree:\n yield path, None, True\n else:\n parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)\n cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)\n if changes:\n yield cf_and_pf, changes, False\n"
] |
class Repo(object):
"""
Wrapper around a libgit Repository that knows:
* How to get all the files in the repository
* How to get the oid of HEAD
* How to get the commit times of the files we want commit times for
It's written with speed in mind, given the constraints of making
performant code in python!
"""
def __init__(self, root_folder):
self.git = Repository(root_folder)
def all_files(self):
"""Return a set of all the files under git control"""
return set([entry.decode() for entry, _ in self.git.open_index().items()])
@property
def first_commit(self):
"""Return the oid of HEAD"""
return self.git.head().decode()
def entries_in_tree_oid(self, prefix, tree_oid):
"""Find the tree at this oid and return entries prefixed with ``prefix``"""
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree))
def entries_in_tree(self, prefix, tree):
"""
Traverse the entries in this tree and yield (prefix, is_tree, oid)
Where prefix is a tuple of the given prefix and the name of the entry.
"""
for entry in tree.items():
if prefix:
new_prefix = prefix + (entry.path.decode(), )
else:
new_prefix = (entry.path.decode(), )
yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):
"""
Return the entries for this commit, the entries of the parent commits,
and the difference between the two (current_files - parent_files)
"""
if prefix and prefixes and prefix not in prefixes:
return empty, empty
parent_files = set()
for oid in parent_oids:
parent_files.update(self.entries_in_tree_oid(prefix, oid))
current_files = self.entries_in_tree_oid(prefix, current_oid)
return (current_files, parent_files), (current_files - parent_files)
def differences_between(self, current_files, parent_files, changes, prefixes):
"""
yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here is written to squeeze as much performance as possible out
of this operation.
"""
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])
else:
parent_values = defaultdict(set)
parent_changes = parent_files - current_files
for path, is_tree, oid in parent_changes:
if is_tree:
parent_values[path].add(oid)
for path, is_tree, oid in changes:
if is_tree and path not in prefixes:
continue
if not is_tree:
yield path, None, True
else:
parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)
cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)
if changes:
yield cf_and_pf, changes, False
|
delfick/gitmit
|
gitmit/repo.py
|
Repo.entries_in_tree_oid
|
python
|
def entries_in_tree_oid(self, prefix, tree_oid):
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree))
|
Find the tree at this oid and return entries prefixed with ``prefix``
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/repo.py#L101-L109
|
[
"def entries_in_tree(self, prefix, tree):\n \"\"\"\n Traverse the entries in this tree and yield (prefix, is_tree, oid)\n\n Where prefix is a tuple of the given prefix and the name of the entry.\n \"\"\"\n for entry in tree.items():\n if prefix:\n new_prefix = prefix + (entry.path.decode(), )\n else:\n new_prefix = (entry.path.decode(), )\n\n yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)\n"
] |
class Repo(object):
"""
Wrapper around a libgit Repository that knows:
* How to get all the files in the repository
* How to get the oid of HEAD
* How to get the commit times of the files we want commit times for
It's written with speed in mind, given the constraints of making
performant code in python!
"""
def __init__(self, root_folder):
self.git = Repository(root_folder)
def all_files(self):
"""Return a set of all the files under git control"""
return set([entry.decode() for entry, _ in self.git.open_index().items()])
@property
def first_commit(self):
"""Return the oid of HEAD"""
return self.git.head().decode()
def file_commit_times(self, use_files_paths, debug=False):
"""
Traverse the commits in the repository, starting from HEAD until we have
found the commit times for all the files we care about.
Yield each file once, only when it is found to be changed in some commit.
If self.debug is true, also output log.debug for the speed we are going
through commits (output commits/second every 1000 commits and every
100000 commits)
"""
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break
def entries_in_tree(self, prefix, tree):
"""
Traverse the entries in this tree and yield (prefix, is_tree, oid)
Where prefix is a tuple of the given prefix and the name of the entry.
"""
for entry in tree.items():
if prefix:
new_prefix = prefix + (entry.path.decode(), )
else:
new_prefix = (entry.path.decode(), )
yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):
"""
Return the entries for this commit, the entries of the parent commits,
and the difference between the two (current_files - parent_files)
"""
if prefix and prefixes and prefix not in prefixes:
return empty, empty
parent_files = set()
for oid in parent_oids:
parent_files.update(self.entries_in_tree_oid(prefix, oid))
current_files = self.entries_in_tree_oid(prefix, current_oid)
return (current_files, parent_files), (current_files - parent_files)
def differences_between(self, current_files, parent_files, changes, prefixes):
"""
yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here is written to squeeze as much performance as possible out
of this operation.
"""
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])
else:
parent_values = defaultdict(set)
parent_changes = parent_files - current_files
for path, is_tree, oid in parent_changes:
if is_tree:
parent_values[path].add(oid)
for path, is_tree, oid in changes:
if is_tree and path not in prefixes:
continue
if not is_tree:
yield path, None, True
else:
parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)
cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)
if changes:
yield cf_and_pf, changes, False
|
delfick/gitmit
|
gitmit/repo.py
|
Repo.entries_in_tree
|
python
|
def entries_in_tree(self, prefix, tree):
for entry in tree.items():
if prefix:
new_prefix = prefix + (entry.path.decode(), )
else:
new_prefix = (entry.path.decode(), )
yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)
|
Traverse the entries in this tree and yield (prefix, is_tree, oid)
Where prefix is a tuple of the given prefix and the name of the entry.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/repo.py#L111-L123
| null |
class Repo(object):
"""
Wrapper around a libgit Repository that knows:
* How to get all the files in the repository
* How to get the oid of HEAD
* How to get the commit times of the files we want commit times for
It's written with speed in mind, given the constraints of making
performant code in python!
"""
def __init__(self, root_folder):
self.git = Repository(root_folder)
def all_files(self):
"""Return a set of all the files under git control"""
return set([entry.decode() for entry, _ in self.git.open_index().items()])
@property
def first_commit(self):
"""Return the oid of HEAD"""
return self.git.head().decode()
def file_commit_times(self, use_files_paths, debug=False):
"""
Traverse the commits in the repository, starting from HEAD until we have
found the commit times for all the files we care about.
Yield each file once, only when it is found to be changed in some commit.
If self.debug is true, also output log.debug for the speed we are going
through commits (output commits/second every 1000 commits and every
100000 commits)
"""
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break
def entries_in_tree_oid(self, prefix, tree_oid):
"""Find the tree at this oid and return entries prefixed with ``prefix``"""
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree))
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):
"""
Return the entries for this commit, the entries of the parent commits,
and the difference between the two (current_files - parent_files)
"""
if prefix and prefixes and prefix not in prefixes:
return empty, empty
parent_files = set()
for oid in parent_oids:
parent_files.update(self.entries_in_tree_oid(prefix, oid))
current_files = self.entries_in_tree_oid(prefix, current_oid)
return (current_files, parent_files), (current_files - parent_files)
def differences_between(self, current_files, parent_files, changes, prefixes):
"""
yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here is written to squeeze as much performance as possible out
of this operation.
"""
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])
else:
parent_values = defaultdict(set)
parent_changes = parent_files - current_files
for path, is_tree, oid in parent_changes:
if is_tree:
parent_values[path].add(oid)
for path, is_tree, oid in changes:
if is_tree and path not in prefixes:
continue
if not is_tree:
yield path, None, True
else:
parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)
cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)
if changes:
yield cf_and_pf, changes, False
|
delfick/gitmit
|
gitmit/repo.py
|
Repo.tree_structures_for
|
python
|
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):
if prefix and prefixes and prefix not in prefixes:
return empty, empty
parent_files = set()
for oid in parent_oids:
parent_files.update(self.entries_in_tree_oid(prefix, oid))
current_files = self.entries_in_tree_oid(prefix, current_oid)
return (current_files, parent_files), (current_files - parent_files)
|
Return the entries for this commit, the entries of the parent commits,
and the difference between the two (current_files - parent_files)
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/repo.py#L125-L138
|
[
"def entries_in_tree_oid(self, prefix, tree_oid):\n \"\"\"Find the tree at this oid and return entries prefixed with ``prefix``\"\"\"\n try:\n tree = self.git.get_object(tree_oid)\n except KeyError:\n log.warning(\"Couldn't find object {0}\".format(tree_oid))\n return empty\n else:\n return frozenset(self.entries_in_tree(prefix, tree))\n"
] |
class Repo(object):
"""
Wrapper around a libgit Repository that knows:
* How to get all the files in the repository
* How to get the oid of HEAD
* How to get the commit times of the files we want commit times for
It's written with speed in mind, given the constraints of making
performant code in python!
"""
def __init__(self, root_folder):
self.git = Repository(root_folder)
def all_files(self):
"""Return a set of all the files under git control"""
return set([entry.decode() for entry, _ in self.git.open_index().items()])
@property
def first_commit(self):
"""Return the oid of HEAD"""
return self.git.head().decode()
def file_commit_times(self, use_files_paths, debug=False):
"""
Traverse the commits in the repository, starting from HEAD until we have
found the commit times for all the files we care about.
Yield each file once, only when it is found to be changed in some commit.
If self.debug is true, also output log.debug for the speed we are going
through commits (output commits/second every 1000 commits and every
100000 commits)
"""
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break
def entries_in_tree_oid(self, prefix, tree_oid):
"""Find the tree at this oid and return entries prefixed with ``prefix``"""
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree))
def entries_in_tree(self, prefix, tree):
"""
Traverse the entries in this tree and yield (prefix, is_tree, oid)
Where prefix is a tuple of the given prefix and the name of the entry.
"""
for entry in tree.items():
if prefix:
new_prefix = prefix + (entry.path.decode(), )
else:
new_prefix = (entry.path.decode(), )
yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)
def differences_between(self, current_files, parent_files, changes, prefixes):
"""
yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here is written to squeeze as much performance as possible out
of this operation.
"""
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])
else:
parent_values = defaultdict(set)
parent_changes = parent_files - current_files
for path, is_tree, oid in parent_changes:
if is_tree:
parent_values[path].add(oid)
for path, is_tree, oid in changes:
if is_tree and path not in prefixes:
continue
if not is_tree:
yield path, None, True
else:
parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)
cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)
if changes:
yield cf_and_pf, changes, False
|
delfick/gitmit
|
gitmit/repo.py
|
Repo.differences_between
|
python
|
def differences_between(self, current_files, parent_files, changes, prefixes):
parent_oid = None
if any(is_tree for _, is_tree, _ in changes):
if len(changes) == 1:
wanted_path = list(changes)[0][0]
parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree])
else:
parent_values = defaultdict(set)
parent_changes = parent_files - current_files
for path, is_tree, oid in parent_changes:
if is_tree:
parent_values[path].add(oid)
for path, is_tree, oid in changes:
if is_tree and path not in prefixes:
continue
if not is_tree:
yield path, None, True
else:
parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty)
cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes)
if changes:
yield cf_and_pf, changes, False
|
yield (thing, changes, is_path)
If is_path is true, changes is None and thing is the path as a tuple.
If is_path is false, thing is the current_files and parent_files for
that changed treeentry and changes is the difference between current_files
and parent_files.
The code here is written to squeeze as much performance as possible out
of this operation.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/repo.py#L140-L176
| null |
class Repo(object):
"""
Wrapper around a libgit Repository that knows:
* How to get all the files in the repository
* How to get the oid of HEAD
* How to get the commit times of the files we want commit times for
It's written with speed in mind, given the constraints of making
performant code in python!
"""
def __init__(self, root_folder):
self.git = Repository(root_folder)
def all_files(self):
"""Return a set of all the files under git control"""
return set([entry.decode() for entry, _ in self.git.open_index().items()])
@property
def first_commit(self):
"""Return the oid of HEAD"""
return self.git.head().decode()
def file_commit_times(self, use_files_paths, debug=False):
"""
Traverse the commits in the repository, starting from HEAD until we have
found the commit times for all the files we care about.
Yield each file once, only when it is found to be changed in some commit.
If self.debug is true, also output log.debug for the speed we are going
through commits (output commits/second every 1000 commits and every
100000 commits)
"""
prefixes = PrefixTree()
prefixes.fill(use_files_paths)
for entry in self.git.get_walker():
# Commit time taking into account the timezone
commit_time = entry.commit.commit_time - entry.commit.commit_timezone
# Get us the two different tree structures between parents and current
cf_and_pf, changes = self.tree_structures_for(()
, entry.commit.tree
, [self.git.get_object(oid).tree for oid in entry.commit.parents]
, prefixes
)
# Deep dive into any differences
difference = []
if changes:
cfs_and_pfs = [(cf_and_pf, changes)]
while cfs_and_pfs:
nxt, changes = cfs_and_pfs.pop(0)
for thing, changes, is_path in self.differences_between(nxt[0], nxt[1], changes, prefixes):
if is_path:
found = prefixes.remove(thing[:-1], thing[-1])
if found:
difference.append('/'.join(thing))
else:
cfs_and_pfs.append((thing, changes))
# Only yield if there was a difference
if difference:
yield entry.commit.sha().hexdigest(), commit_time, difference
# If nothing remains, then break!
if not prefixes:
break
def entries_in_tree_oid(self, prefix, tree_oid):
"""Find the tree at this oid and return entries prefixed with ``prefix``"""
try:
tree = self.git.get_object(tree_oid)
except KeyError:
log.warning("Couldn't find object {0}".format(tree_oid))
return empty
else:
return frozenset(self.entries_in_tree(prefix, tree))
def entries_in_tree(self, prefix, tree):
"""
Traverse the entries in this tree and yield (prefix, is_tree, oid)
Where prefix is a tuple of the given prefix and the name of the entry.
"""
for entry in tree.items():
if prefix:
new_prefix = prefix + (entry.path.decode(), )
else:
new_prefix = (entry.path.decode(), )
yield (new_prefix, stat.S_ISDIR(entry.mode), entry.sha)
def tree_structures_for(self, prefix, current_oid, parent_oids, prefixes):
"""
Return the entries for this commit, the entries of the parent commits,
and the difference between the two (current_files - parent_files)
"""
if prefix and prefixes and prefix not in prefixes:
return empty, empty
parent_files = set()
for oid in parent_oids:
parent_files.update(self.entries_in_tree_oid(prefix, oid))
current_files = self.entries_in_tree_oid(prefix, current_oid)
return (current_files, parent_files), (current_files - parent_files)
|
delfick/gitmit
|
gitmit/cache.py
|
get_all_cached_commit_times
|
python
|
def get_all_cached_commit_times(root_folder):
result = []
location = cache_location(root_folder)
if os.path.exists(location):
try:
result = json.load(open(location))
except (TypeError, ValueError) as error:
log.warning("Failed to open gitmit cached commit_times\tlocation=%s\terror=%s", location, error)
else:
if type(result) is not list or not all(type(item) is dict for item in result):
log.warning("Gitmit cached commit_times needs to be a list of dictionaries\tlocation=%s\tgot=%s", location, type(result))
result = []
return result
|
Find the gitmit cached commit_times and return them if they are the right shape.
This means the file is a list of dictionaries.
If they aren't, issue a warning and return an empty list, it is just a cache
after all!
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/cache.py#L20-L42
|
[
"def cache_location(root_folder):\n \"\"\"\n Return us the location to the commit times cache\n\n This is <root_folder>/.git/gitmit_cached_commit_times.json\n \"\"\"\n return os.path.join(root_folder, \".git\", \"gitmit_cached_commit_times.json\")\n"
] |
"""
This holds the functionality to write and read a cache of the modified times
for a repository.
"""
import logging
import json
import os
log = logging.getLogger("gitmit.cache")
def cache_location(root_folder):
"""
Return us the location to the commit times cache
This is <root_folder>/.git/gitmit_cached_commit_times.json
"""
return os.path.join(root_folder, ".git", "gitmit_cached_commit_times.json")
def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths):
"""
Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times!
"""
result = get_all_cached_commit_times(root_folder)
for item in result:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
return item.get("commit"), item.get("commit_times")
return None, {}
def set_cached_commit_times(root_folder, parent_dir, first_commit, commit_times, sorted_relpaths):
"""
Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't.
"""
current = get_all_cached_commit_times(root_folder)
location = cache_location(root_folder)
found = False
for item in current:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
item["commit_times"] = commit_times
item["commit"] = str(first_commit)
found = True
break
if not found:
current.append({"commit": str(first_commit), "parent_dir": parent_dir, "commit_times": commit_times, "sorted_relpaths": sorted_relpaths})
# Make sure it doesn't grow too big....
# Arbitrary number is arbitrary
while len(current) > 5:
current.pop(0)
try:
log.info("Writing gitmit cached commit_times\tlocation=%s", location)
with open(location, "w") as fle:
json.dump(current, fle)
except (TypeError, ValueError, IOError) as error:
log.warning("Failed to dump gitmit mtime cache\tlocation=%s\terror=%s", location, error)
|
delfick/gitmit
|
gitmit/cache.py
|
get_cached_commit_times
|
python
|
def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths):
result = get_all_cached_commit_times(root_folder)
for item in result:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
return item.get("commit"), item.get("commit_times")
return None, {}
|
Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times!
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/cache.py#L44-L56
|
[
"def get_all_cached_commit_times(root_folder):\n \"\"\"\n Find the gitmit cached commit_times and return them if they are the right shape.\n\n This means the file is a list of dictionaries.\n\n If they aren't, issue a warning and return an empty list, it is just a cache\n after all!\n \"\"\"\n result = []\n location = cache_location(root_folder)\n\n if os.path.exists(location):\n try:\n result = json.load(open(location))\n except (TypeError, ValueError) as error:\n log.warning(\"Failed to open gitmit cached commit_times\\tlocation=%s\\terror=%s\", location, error)\n else:\n if type(result) is not list or not all(type(item) is dict for item in result):\n log.warning(\"Gitmit cached commit_times needs to be a list of dictionaries\\tlocation=%s\\tgot=%s\", location, type(result))\n result = []\n\n return result\n"
] |
"""
This holds the functionality to write and read a cache of the modified times
for a repository.
"""
import logging
import json
import os
log = logging.getLogger("gitmit.cache")
def cache_location(root_folder):
"""
Return us the location to the commit times cache
This is <root_folder>/.git/gitmit_cached_commit_times.json
"""
return os.path.join(root_folder, ".git", "gitmit_cached_commit_times.json")
def get_all_cached_commit_times(root_folder):
"""
Find the gitmit cached commit_times and return them if they are the right shape.
This means the file is a list of dictionaries.
If they aren't, issue a warning and return an empty list, it is just a cache
after all!
"""
result = []
location = cache_location(root_folder)
if os.path.exists(location):
try:
result = json.load(open(location))
except (TypeError, ValueError) as error:
log.warning("Failed to open gitmit cached commit_times\tlocation=%s\terror=%s", location, error)
else:
if type(result) is not list or not all(type(item) is dict for item in result):
log.warning("Gitmit cached commit_times needs to be a list of dictionaries\tlocation=%s\tgot=%s", location, type(result))
result = []
return result
def set_cached_commit_times(root_folder, parent_dir, first_commit, commit_times, sorted_relpaths):
"""
Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't.
"""
current = get_all_cached_commit_times(root_folder)
location = cache_location(root_folder)
found = False
for item in current:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
item["commit_times"] = commit_times
item["commit"] = str(first_commit)
found = True
break
if not found:
current.append({"commit": str(first_commit), "parent_dir": parent_dir, "commit_times": commit_times, "sorted_relpaths": sorted_relpaths})
# Make sure it doesn't grow too big....
# Arbitrary number is arbitrary
while len(current) > 5:
current.pop(0)
try:
log.info("Writing gitmit cached commit_times\tlocation=%s", location)
with open(location, "w") as fle:
json.dump(current, fle)
except (TypeError, ValueError, IOError) as error:
log.warning("Failed to dump gitmit mtime cache\tlocation=%s\terror=%s", location, error)
|
delfick/gitmit
|
gitmit/cache.py
|
set_cached_commit_times
|
python
|
def set_cached_commit_times(root_folder, parent_dir, first_commit, commit_times, sorted_relpaths):
current = get_all_cached_commit_times(root_folder)
location = cache_location(root_folder)
found = False
for item in current:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
item["commit_times"] = commit_times
item["commit"] = str(first_commit)
found = True
break
if not found:
current.append({"commit": str(first_commit), "parent_dir": parent_dir, "commit_times": commit_times, "sorted_relpaths": sorted_relpaths})
# Make sure it doesn't grow too big....
# Arbitrary number is arbitrary
while len(current) > 5:
current.pop(0)
try:
log.info("Writing gitmit cached commit_times\tlocation=%s", location)
with open(location, "w") as fle:
json.dump(current, fle)
except (TypeError, ValueError, IOError) as error:
log.warning("Failed to dump gitmit mtime cache\tlocation=%s\terror=%s", location, error)
|
Set the cached commit times in a json file at cache_location(root_folder)
We first get what is currently in the cache and either modify the existing
entry for this combo of parent_dir and sorted_relpaths.
Or add to the entries.
We then ensure there's less than 5 entries to keep the cache from growing
too large (arbitrary number is arbitrary).
Finally, we write the cache or issue a warning if we can't.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/cache.py#L58-L96
|
[
"def cache_location(root_folder):\n \"\"\"\n Return us the location to the commit times cache\n\n This is <root_folder>/.git/gitmit_cached_commit_times.json\n \"\"\"\n return os.path.join(root_folder, \".git\", \"gitmit_cached_commit_times.json\")\n",
"def get_all_cached_commit_times(root_folder):\n \"\"\"\n Find the gitmit cached commit_times and return them if they are the right shape.\n\n This means the file is a list of dictionaries.\n\n If they aren't, issue a warning and return an empty list, it is just a cache\n after all!\n \"\"\"\n result = []\n location = cache_location(root_folder)\n\n if os.path.exists(location):\n try:\n result = json.load(open(location))\n except (TypeError, ValueError) as error:\n log.warning(\"Failed to open gitmit cached commit_times\\tlocation=%s\\terror=%s\", location, error)\n else:\n if type(result) is not list or not all(type(item) is dict for item in result):\n log.warning(\"Gitmit cached commit_times needs to be a list of dictionaries\\tlocation=%s\\tgot=%s\", location, type(result))\n result = []\n\n return result\n"
] |
"""
This holds the functionality to write and read a cache of the modified times
for a repository.
"""
import logging
import json
import os
log = logging.getLogger("gitmit.cache")
def cache_location(root_folder):
"""
Return us the location to the commit times cache
This is <root_folder>/.git/gitmit_cached_commit_times.json
"""
return os.path.join(root_folder, ".git", "gitmit_cached_commit_times.json")
def get_all_cached_commit_times(root_folder):
"""
Find the gitmit cached commit_times and return them if they are the right shape.
This means the file is a list of dictionaries.
If they aren't, issue a warning and return an empty list, it is just a cache
after all!
"""
result = []
location = cache_location(root_folder)
if os.path.exists(location):
try:
result = json.load(open(location))
except (TypeError, ValueError) as error:
log.warning("Failed to open gitmit cached commit_times\tlocation=%s\terror=%s", location, error)
else:
if type(result) is not list or not all(type(item) is dict for item in result):
log.warning("Gitmit cached commit_times needs to be a list of dictionaries\tlocation=%s\tgot=%s", location, type(result))
result = []
return result
def get_cached_commit_times(root_folder, parent_dir, sorted_relpaths):
"""
Get the cached commit times for the combination of this parent_dir and relpaths
Return the commit assigned to this combination and the actual times!
"""
result = get_all_cached_commit_times(root_folder)
for item in result:
if sorted(item.get("sorted_relpaths", [])) == sorted_relpaths and item.get("parent_dir") == parent_dir:
return item.get("commit"), item.get("commit_times")
return None, {}
|
delfick/gitmit
|
gitmit/prefix_tree.py
|
PrefixTree.fill
|
python
|
def fill(self, paths):
for path in paths:
tree = self.tree
parts = tuple(path.split('/'))
dir_parts = parts[:-1]
built = ()
for part in dir_parts:
self.cache[built] = tree
built += (part, )
parent = tree
tree = parent.folders.get(part, empty)
if tree is empty:
tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)
self.cache[dir_parts] = tree
tree.files.add(parts[-1])
|
Initialise the tree.
paths is a list of strings where each string is the relative path to some
file.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/prefix_tree.py#L59-L80
| null |
class PrefixTree(object):
"""
Holds a linked list like structure for traversal and a cache of
("path", "to", "folder") to the tree representing that folder.
Each Tree is an instance of TreeItem, as initialised after calling
PrefixTree#fill.
The idea is you fill the tree once and then remove files one at a time until
all the files are gone.
"""
def __init__(self):
self.tree = TreeItem(name=(), folders={}, files=set(), parent=None)
self.cache = {}
def __bool__(self):
"""Check the cache to see if there are any folders left with contents"""
return bool(self.cache)
__nonzero__ = __bool__
def __contains__(self, prefix):
"""
Determine if we have this prefix in the tree where prefix is a tuple of
the parts in the path.
"""
return prefix in self.cache
def remove(self, prefix, name):
"""
Remove a path from the tree
prefix is a tuple of the parts in the dirpath
name is a string representing the name of the file itself.
Any empty folders from the point of the file backwards to the root of
the tree is removed.
"""
tree = self.cache.get(prefix, empty)
if tree is empty:
return False
if name not in tree.files:
return False
tree.files.remove(name)
self.remove_folder(tree, list(prefix))
return True
def remove_folder(self, tree, prefix):
"""
Used to remove any empty folders
If this folder is empty then it is removed. If the parent is empty as a
result, then the parent is also removed, and so on.
"""
while True:
child = tree
tree = tree.parent
if not child.folders and not child.files:
del self.cache[tuple(prefix)]
if tree:
del tree.folders[prefix.pop()]
if not tree or tree.folders or tree.files:
break
|
delfick/gitmit
|
gitmit/prefix_tree.py
|
PrefixTree.remove
|
python
|
def remove(self, prefix, name):
tree = self.cache.get(prefix, empty)
if tree is empty:
return False
if name not in tree.files:
return False
tree.files.remove(name)
self.remove_folder(tree, list(prefix))
return True
|
Remove a path from the tree
prefix is a tuple of the parts in the dirpath
name is a string representing the name of the file itself.
Any empty folders from the point of the file backwards to the root of
the tree is removed.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/prefix_tree.py#L82-L103
|
[
"def remove_folder(self, tree, prefix):\n \"\"\"\n Used to remove any empty folders\n\n If this folder is empty then it is removed. If the parent is empty as a\n result, then the parent is also removed, and so on.\n \"\"\"\n while True:\n child = tree\n tree = tree.parent\n\n if not child.folders and not child.files:\n del self.cache[tuple(prefix)]\n if tree:\n del tree.folders[prefix.pop()]\n\n if not tree or tree.folders or tree.files:\n break\n"
] |
class PrefixTree(object):
"""
Holds a linked list like structure for traversal and a cache of
("path", "to", "folder") to the tree representing that folder.
Each Tree is an instance of TreeItem, as initialised after calling
PrefixTree#fill.
The idea is you fill the tree once and then remove files one at a time until
all the files are gone.
"""
def __init__(self):
self.tree = TreeItem(name=(), folders={}, files=set(), parent=None)
self.cache = {}
def __bool__(self):
"""Check the cache to see if there are any folders left with contents"""
return bool(self.cache)
__nonzero__ = __bool__
def __contains__(self, prefix):
"""
Determine if we have this prefix in the tree where prefix is a tuple of
the parts in the path.
"""
return prefix in self.cache
def fill(self, paths):
"""
Initialise the tree.
paths is a list of strings where each string is the relative path to some
file.
"""
for path in paths:
tree = self.tree
parts = tuple(path.split('/'))
dir_parts = parts[:-1]
built = ()
for part in dir_parts:
self.cache[built] = tree
built += (part, )
parent = tree
tree = parent.folders.get(part, empty)
if tree is empty:
tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)
self.cache[dir_parts] = tree
tree.files.add(parts[-1])
def remove_folder(self, tree, prefix):
"""
Used to remove any empty folders
If this folder is empty then it is removed. If the parent is empty as a
result, then the parent is also removed, and so on.
"""
while True:
child = tree
tree = tree.parent
if not child.folders and not child.files:
del self.cache[tuple(prefix)]
if tree:
del tree.folders[prefix.pop()]
if not tree or tree.folders or tree.files:
break
|
delfick/gitmit
|
gitmit/prefix_tree.py
|
PrefixTree.remove_folder
|
python
|
def remove_folder(self, tree, prefix):
while True:
child = tree
tree = tree.parent
if not child.folders and not child.files:
del self.cache[tuple(prefix)]
if tree:
del tree.folders[prefix.pop()]
if not tree or tree.folders or tree.files:
break
|
Used to remove any empty folders
If this folder is empty then it is removed. If the parent is empty as a
result, then the parent is also removed, and so on.
|
train
|
https://github.com/delfick/gitmit/blob/ae0aef14a06b25ad2811f8f47cc97e68a0910eae/gitmit/prefix_tree.py#L105-L122
| null |
class PrefixTree(object):
"""
Holds a linked list like structure for traversal and a cache of
("path", "to", "folder") to the tree representing that folder.
Each Tree is an instance of TreeItem, as initialised after calling
PrefixTree#fill.
The idea is you fill the tree once and then remove files one at a time until
all the files are gone.
"""
def __init__(self):
self.tree = TreeItem(name=(), folders={}, files=set(), parent=None)
self.cache = {}
def __bool__(self):
"""Check the cache to see if there are any folders left with contents"""
return bool(self.cache)
__nonzero__ = __bool__
def __contains__(self, prefix):
"""
Determine if we have this prefix in the tree where prefix is a tuple of
the parts in the path.
"""
return prefix in self.cache
def fill(self, paths):
"""
Initialise the tree.
paths is a list of strings where each string is the relative path to some
file.
"""
for path in paths:
tree = self.tree
parts = tuple(path.split('/'))
dir_parts = parts[:-1]
built = ()
for part in dir_parts:
self.cache[built] = tree
built += (part, )
parent = tree
tree = parent.folders.get(part, empty)
if tree is empty:
tree = parent.folders[part] = TreeItem(name=built, folders={}, files=set(), parent=parent)
self.cache[dir_parts] = tree
tree.files.add(parts[-1])
def remove(self, prefix, name):
"""
Remove a path from the tree
prefix is a tuple of the parts in the dirpath
name is a string representing the name of the file itself.
Any empty folders from the point of the file backwards to the root of
the tree is removed.
"""
tree = self.cache.get(prefix, empty)
if tree is empty:
return False
if name not in tree.files:
return False
tree.files.remove(name)
self.remove_folder(tree, list(prefix))
return True
|
emencia/emencia-django-forum
|
forum/models.py
|
Category.get_last_thread
|
python
|
def get_last_thread(self):
cache_key = '_get_last_thread_cache'
if not hasattr(self, cache_key):
item = None
res = self.thread_set.filter(visible=True).order_by('-modified')[0:1]
if len(res)>0:
item = res[0]
setattr(self, cache_key, item)
return getattr(self, cache_key)
|
Return the last modified thread
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L31-L42
| null |
class Category(models.Model):
"""
Category
"""
created = models.DateTimeField(_("created"), auto_now_add=True)
slug = models.SlugField(_('slug'), unique=True, max_length=50)
order = models.SmallIntegerField(_('order'))
title = models.CharField(_("title"), blank=False, max_length=255, unique=True)
description = models.TextField(_("description"), blank=True)
visible = models.BooleanField(_('visible'), default=True, help_text=_("Unvisible category won't be visible nor its threads."))
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('forum:category-details', [self.slug])
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
permissions = (
('moderate_category', 'Moderate category'),
)
|
emencia/emencia-django-forum
|
forum/models.py
|
Thread.get_first_post
|
python
|
def get_first_post(self):
cache_key = '_get_starter_cache'
if not hasattr(self, cache_key):
item = None
res = self.post_set.all().order_by('created')[0:1]
if len(res)>0:
item = res[0]
setattr(self, cache_key, item)
return getattr(self, cache_key)
|
Retourne le premier post en date, celui créé lors de la création du fil
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L73-L84
| null |
class Thread(models.Model):
"""
Thread
"""
created = models.DateTimeField(_("created"), editable=False, null=True, blank=True)
modified = models.DateTimeField(_("modified"), editable=False, null=True, blank=True, help_text=_("This only filled when a message is added."))
author = models.ForeignKey(User, verbose_name=_("author"), blank=False)
category = models.ForeignKey(Category, verbose_name=_("category"))
subject = models.CharField(_("subject"), max_length=150)
closed = models.BooleanField(_("closed"), default=False)
sticky = models.BooleanField(_("sticky"), default=False, help_text=_("Sticky thread will be on top of thread list."))
announce = models.BooleanField(_("announce"), default=False, help_text=_("Announce thread can be displayed out of the forum"))
visible = models.BooleanField(_('visible'), default=True, help_text=_("Unvisible threads won't be visible nor its messages."))
def __unicode__(self):
return self.subject
@models.permalink
def get_absolute_url(self):
return ('forum:thread-details', [self.category.slug, self.id])
def get_last_post(self):
"""
Retourne le dernier post en date
"""
cache_key = '_get_last_poster_cache'
if not hasattr(self, cache_key):
item = None
res = self.post_set.all().order_by('-created')[0:1]
if len(res)>0:
item = res[0]
setattr(self, cache_key, item)
return getattr(self, cache_key)
def save(self, *args, **kwargs):
"""
Fill 'created' and 'modified' attributes on first create
"""
if self.created is None:
self.created = tz_now()
if self.modified is None:
self.modified = self.created
super(Thread, self).save(*args, **kwargs)
class Meta:
verbose_name = _("Thread")
verbose_name_plural = _("Threads")
permissions = (
('moderate_thread', 'Moderate thread'),
)
|
emencia/emencia-django-forum
|
forum/models.py
|
Thread.save
|
python
|
def save(self, *args, **kwargs):
if self.created is None:
self.created = tz_now()
if self.modified is None:
self.modified = self.created
super(Thread, self).save(*args, **kwargs)
|
Fill 'created' and 'modified' attributes on first create
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L99-L109
| null |
class Thread(models.Model):
"""
Thread
"""
created = models.DateTimeField(_("created"), editable=False, null=True, blank=True)
modified = models.DateTimeField(_("modified"), editable=False, null=True, blank=True, help_text=_("This only filled when a message is added."))
author = models.ForeignKey(User, verbose_name=_("author"), blank=False)
category = models.ForeignKey(Category, verbose_name=_("category"))
subject = models.CharField(_("subject"), max_length=150)
closed = models.BooleanField(_("closed"), default=False)
sticky = models.BooleanField(_("sticky"), default=False, help_text=_("Sticky thread will be on top of thread list."))
announce = models.BooleanField(_("announce"), default=False, help_text=_("Announce thread can be displayed out of the forum"))
visible = models.BooleanField(_('visible'), default=True, help_text=_("Unvisible threads won't be visible nor its messages."))
def __unicode__(self):
return self.subject
@models.permalink
def get_absolute_url(self):
return ('forum:thread-details', [self.category.slug, self.id])
def get_first_post(self):
"""
Retourne le premier post en date, celui créé lors de la création du fil
"""
cache_key = '_get_starter_cache'
if not hasattr(self, cache_key):
item = None
res = self.post_set.all().order_by('created')[0:1]
if len(res)>0:
item = res[0]
setattr(self, cache_key, item)
return getattr(self, cache_key)
def get_last_post(self):
"""
Retourne le dernier post en date
"""
cache_key = '_get_last_poster_cache'
if not hasattr(self, cache_key):
item = None
res = self.post_set.all().order_by('-created')[0:1]
if len(res)>0:
item = res[0]
setattr(self, cache_key, item)
return getattr(self, cache_key)
def save(self, *args, **kwargs):
"""
Fill 'created' and 'modified' attributes on first create
"""
if self.created is None:
self.created = tz_now()
if self.modified is None:
self.modified = self.created
super(Thread, self).save(*args, **kwargs)
class Meta:
verbose_name = _("Thread")
verbose_name_plural = _("Threads")
permissions = (
('moderate_thread', 'Moderate thread'),
)
|
emencia/emencia-django-forum
|
forum/models.py
|
Post.get_paginated_urlargs
|
python
|
def get_paginated_urlargs(self):
position = self.get_paginated_position()
if not position:
return '#forum-post-{0}'.format(self.id)
return '?page={0}#forum-post-{1}'.format(position, self.id)
|
Return url arguments to retrieve the Post in a paginated list
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L150-L159
|
[
"def get_paginated_position(self):\n \"\"\"\n Return the Post position in the paginated list\n \"\"\"\n # If Post list is not paginated\n if not settings.FORUM_THREAD_DETAIL_PAGINATE:\n return 0\n\n count = Post.objects.filter(thread=self.thread_id, created__lt=self.created).count() + 1\n\n return int(math.ceil(count / float(settings.FORUM_THREAD_DETAIL_PAGINATE)))\n"
] |
class Post(models.Model):
"""
Thread message
"""
author = models.ForeignKey(User, verbose_name=_("author"), blank=False)
thread = models.ForeignKey(Thread, verbose_name=_("thread"))
created = models.DateTimeField(_("created"), editable=False, blank=True, null=True)
modified = models.DateTimeField(_("modified"), editable=False, blank=True, null=True)
text = models.TextField(_('message'))
def __unicode__(self):
return _("{0}: message #{1}").format(self.thread.subject, self.id)
def get_absolute_url(self):
return u"{0}{1}".format(self.thread.get_absolute_url(), self.get_paginated_urlargs())
def get_paginated_urlargs(self):
"""
Return url arguments to retrieve the Post in a paginated list
"""
position = self.get_paginated_position()
if not position:
return '#forum-post-{0}'.format(self.id)
return '?page={0}#forum-post-{1}'.format(position, self.id)
def get_paginated_position(self):
"""
Return the Post position in the paginated list
"""
# If Post list is not paginated
if not settings.FORUM_THREAD_DETAIL_PAGINATE:
return 0
count = Post.objects.filter(thread=self.thread_id, created__lt=self.created).count() + 1
return int(math.ceil(count / float(settings.FORUM_THREAD_DETAIL_PAGINATE)))
def save(self, *args, **kwargs):
"""
Fill 'created' and 'modified' attributes on first create and allways update
the thread's 'modified' attribute
"""
edited = not(self.created is None)
if self.created is None:
self.created = tz_now()
# Update de la date de modif. du message
if self.modified is None:
self.modified = self.created
else:
self.modified = tz_now()
super(Post, self).save(*args, **kwargs)
# Update de la date de modif. du thread lors de la création du message
if not edited:
self.thread.modified = self.created
self.thread.save()
class Meta:
verbose_name = _("Message")
verbose_name_plural = _("Messages")
|
emencia/emencia-django-forum
|
forum/models.py
|
Post.get_paginated_position
|
python
|
def get_paginated_position(self):
# If Post list is not paginated
if not settings.FORUM_THREAD_DETAIL_PAGINATE:
return 0
count = Post.objects.filter(thread=self.thread_id, created__lt=self.created).count() + 1
return int(math.ceil(count / float(settings.FORUM_THREAD_DETAIL_PAGINATE)))
|
Return the Post position in the paginated list
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L161-L171
| null |
class Post(models.Model):
"""
Thread message
"""
author = models.ForeignKey(User, verbose_name=_("author"), blank=False)
thread = models.ForeignKey(Thread, verbose_name=_("thread"))
created = models.DateTimeField(_("created"), editable=False, blank=True, null=True)
modified = models.DateTimeField(_("modified"), editable=False, blank=True, null=True)
text = models.TextField(_('message'))
def __unicode__(self):
return _("{0}: message #{1}").format(self.thread.subject, self.id)
def get_absolute_url(self):
return u"{0}{1}".format(self.thread.get_absolute_url(), self.get_paginated_urlargs())
def get_paginated_urlargs(self):
"""
Return url arguments to retrieve the Post in a paginated list
"""
position = self.get_paginated_position()
if not position:
return '#forum-post-{0}'.format(self.id)
return '?page={0}#forum-post-{1}'.format(position, self.id)
def get_paginated_position(self):
"""
Return the Post position in the paginated list
"""
# If Post list is not paginated
if not settings.FORUM_THREAD_DETAIL_PAGINATE:
return 0
count = Post.objects.filter(thread=self.thread_id, created__lt=self.created).count() + 1
return int(math.ceil(count / float(settings.FORUM_THREAD_DETAIL_PAGINATE)))
def save(self, *args, **kwargs):
"""
Fill 'created' and 'modified' attributes on first create and allways update
the thread's 'modified' attribute
"""
edited = not(self.created is None)
if self.created is None:
self.created = tz_now()
# Update de la date de modif. du message
if self.modified is None:
self.modified = self.created
else:
self.modified = tz_now()
super(Post, self).save(*args, **kwargs)
# Update de la date de modif. du thread lors de la création du message
if not edited:
self.thread.modified = self.created
self.thread.save()
class Meta:
verbose_name = _("Message")
verbose_name_plural = _("Messages")
|
emencia/emencia-django-forum
|
forum/models.py
|
Post.save
|
python
|
def save(self, *args, **kwargs):
edited = not(self.created is None)
if self.created is None:
self.created = tz_now()
# Update de la date de modif. du message
if self.modified is None:
self.modified = self.created
else:
self.modified = tz_now()
super(Post, self).save(*args, **kwargs)
# Update de la date de modif. du thread lors de la création du message
if not edited:
self.thread.modified = self.created
self.thread.save()
|
Fill 'created' and 'modified' attributes on first create and allways update
the thread's 'modified' attribute
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/models.py#L173-L194
| null |
class Post(models.Model):
"""
Thread message
"""
author = models.ForeignKey(User, verbose_name=_("author"), blank=False)
thread = models.ForeignKey(Thread, verbose_name=_("thread"))
created = models.DateTimeField(_("created"), editable=False, blank=True, null=True)
modified = models.DateTimeField(_("modified"), editable=False, blank=True, null=True)
text = models.TextField(_('message'))
def __unicode__(self):
return _("{0}: message #{1}").format(self.thread.subject, self.id)
def get_absolute_url(self):
return u"{0}{1}".format(self.thread.get_absolute_url(), self.get_paginated_urlargs())
def get_paginated_urlargs(self):
"""
Return url arguments to retrieve the Post in a paginated list
"""
position = self.get_paginated_position()
if not position:
return '#forum-post-{0}'.format(self.id)
return '?page={0}#forum-post-{1}'.format(position, self.id)
def get_paginated_position(self):
"""
Return the Post position in the paginated list
"""
# If Post list is not paginated
if not settings.FORUM_THREAD_DETAIL_PAGINATE:
return 0
count = Post.objects.filter(thread=self.thread_id, created__lt=self.created).count() + 1
return int(math.ceil(count / float(settings.FORUM_THREAD_DETAIL_PAGINATE)))
def save(self, *args, **kwargs):
"""
Fill 'created' and 'modified' attributes on first create and allways update
the thread's 'modified' attribute
"""
edited = not(self.created is None)
if self.created is None:
self.created = tz_now()
# Update de la date de modif. du message
if self.modified is None:
self.modified = self.created
else:
self.modified = tz_now()
super(Post, self).save(*args, **kwargs)
# Update de la date de modif. du thread lors de la création du message
if not edited:
self.thread.modified = self.created
self.thread.save()
class Meta:
verbose_name = _("Message")
verbose_name_plural = _("Messages")
|
emencia/emencia-django-forum
|
forum/forms/crispies.py
|
category_helper
|
python
|
def category_helper(form_tag=True):
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
Row(
Column(
'title',
css_class='small-12'
),
),
Row(
Column(
'slug',
css_class='small-12 medium-10'
),
Column(
'order',
css_class='small-12 medium-2'
),
),
Row(
Column(
'description',
css_class='small-12'
),
),
Row(
Column(
'visible',
css_class='small-12'
),
),
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
)
return helper
|
Category's form layout helper
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/crispies.py#L9-L53
| null |
"""
Crispy forms layouts
"""
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Row, Column, ButtonHolderPanel, Submit
def category_helper(form_tag=True):
"""
Category's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
Row(
Column(
'title',
css_class='small-12'
),
),
Row(
Column(
'slug',
css_class='small-12 medium-10'
),
Column(
'order',
css_class='small-12 medium-2'
),
),
Row(
Column(
'description',
css_class='small-12'
),
),
Row(
Column(
'visible',
css_class='small-12'
),
),
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
)
return helper
def thread_helper(form_tag=True, edit_mode=False, for_moderator=False):
"""
Thread's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'subject',
css_class='small-12'
),
),
]
# Category field only in edit form
if edit_mode:
fieldsets.append(
Row(
Column(
'category',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'sticky',
css_class='small-12 medium-4'
),
Column(
'announce',
css_class='small-12 medium-4'
),
Column(
'closed',
css_class='small-12 medium-4'
),
),
)
# First message is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'text',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'visible',
css_class='small-12'
),
),
)
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_helper(form_tag=True, edit_mode=False):
"""
Post's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_edit_helper(form_tag=True):
return post_helper(form_tag=form_tag, edit_mode=True)
def post_delete_helper(form_tag=True):
"""
Message's delete form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
ButtonHolderPanel(
Row(
Column(
'confirm',
css_class='small-12 medium-8'
),
Column(
Submit('submit', _('Submit')),
css_class='small-12 medium-4 text-right'
),
),
),
)
return helper
|
emencia/emencia-django-forum
|
forum/forms/crispies.py
|
thread_helper
|
python
|
def thread_helper(form_tag=True, edit_mode=False, for_moderator=False):
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'subject',
css_class='small-12'
),
),
]
# Category field only in edit form
if edit_mode:
fieldsets.append(
Row(
Column(
'category',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'sticky',
css_class='small-12 medium-4'
),
Column(
'announce',
css_class='small-12 medium-4'
),
Column(
'closed',
css_class='small-12 medium-4'
),
),
)
# First message is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'text',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'visible',
css_class='small-12'
),
),
)
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
|
Thread's form layout helper
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/crispies.py#L57-L145
| null |
"""
Crispy forms layouts
"""
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Row, Column, ButtonHolderPanel, Submit
def category_helper(form_tag=True):
"""
Category's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
Row(
Column(
'title',
css_class='small-12'
),
),
Row(
Column(
'slug',
css_class='small-12 medium-10'
),
Column(
'order',
css_class='small-12 medium-2'
),
),
Row(
Column(
'description',
css_class='small-12'
),
),
Row(
Column(
'visible',
css_class='small-12'
),
),
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
)
return helper
def thread_helper(form_tag=True, edit_mode=False, for_moderator=False):
"""
Thread's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'subject',
css_class='small-12'
),
),
]
# Category field only in edit form
if edit_mode:
fieldsets.append(
Row(
Column(
'category',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'sticky',
css_class='small-12 medium-4'
),
Column(
'announce',
css_class='small-12 medium-4'
),
Column(
'closed',
css_class='small-12 medium-4'
),
),
)
# First message is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'text',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'visible',
css_class='small-12'
),
),
)
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_helper(form_tag=True, edit_mode=False):
"""
Post's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_edit_helper(form_tag=True):
return post_helper(form_tag=form_tag, edit_mode=True)
def post_delete_helper(form_tag=True):
"""
Message's delete form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
ButtonHolderPanel(
Row(
Column(
'confirm',
css_class='small-12 medium-8'
),
Column(
Submit('submit', _('Submit')),
css_class='small-12 medium-4 text-right'
),
),
),
)
return helper
|
emencia/emencia-django-forum
|
forum/forms/crispies.py
|
post_helper
|
python
|
def post_helper(form_tag=True, edit_mode=False):
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
|
Post's form layout helper
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/crispies.py#L148-L186
| null |
"""
Crispy forms layouts
"""
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Row, Column, ButtonHolderPanel, Submit
def category_helper(form_tag=True):
"""
Category's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
Row(
Column(
'title',
css_class='small-12'
),
),
Row(
Column(
'slug',
css_class='small-12 medium-10'
),
Column(
'order',
css_class='small-12 medium-2'
),
),
Row(
Column(
'description',
css_class='small-12'
),
),
Row(
Column(
'visible',
css_class='small-12'
),
),
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
)
return helper
def thread_helper(form_tag=True, edit_mode=False, for_moderator=False):
"""
Thread's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'subject',
css_class='small-12'
),
),
]
# Category field only in edit form
if edit_mode:
fieldsets.append(
Row(
Column(
'category',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'sticky',
css_class='small-12 medium-4'
),
Column(
'announce',
css_class='small-12 medium-4'
),
Column(
'closed',
css_class='small-12 medium-4'
),
),
)
# First message is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'text',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'visible',
css_class='small-12'
),
),
)
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_helper(form_tag=True, edit_mode=False):
"""
Post's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_edit_helper(form_tag=True):
return post_helper(form_tag=form_tag, edit_mode=True)
def post_delete_helper(form_tag=True):
"""
Message's delete form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
ButtonHolderPanel(
Row(
Column(
'confirm',
css_class='small-12 medium-8'
),
Column(
Submit('submit', _('Submit')),
css_class='small-12 medium-4 text-right'
),
),
),
)
return helper
|
emencia/emencia-django-forum
|
forum/forms/crispies.py
|
post_delete_helper
|
python
|
def post_delete_helper(form_tag=True):
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
ButtonHolderPanel(
Row(
Column(
'confirm',
css_class='small-12 medium-8'
),
Column(
Submit('submit', _('Submit')),
css_class='small-12 medium-4 text-right'
),
),
),
)
return helper
|
Message's delete form layout helper
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/crispies.py#L191-L215
| null |
"""
Crispy forms layouts
"""
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Row, Column, ButtonHolderPanel, Submit
def category_helper(form_tag=True):
"""
Category's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
Row(
Column(
'title',
css_class='small-12'
),
),
Row(
Column(
'slug',
css_class='small-12 medium-10'
),
Column(
'order',
css_class='small-12 medium-2'
),
),
Row(
Column(
'description',
css_class='small-12'
),
),
Row(
Column(
'visible',
css_class='small-12'
),
),
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
)
return helper
def thread_helper(form_tag=True, edit_mode=False, for_moderator=False):
"""
Thread's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'subject',
css_class='small-12'
),
),
]
# Category field only in edit form
if edit_mode:
fieldsets.append(
Row(
Column(
'category',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'sticky',
css_class='small-12 medium-4'
),
Column(
'announce',
css_class='small-12 medium-4'
),
Column(
'closed',
css_class='small-12 medium-4'
),
),
)
# First message is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'text',
css_class='small-12'
),
),
)
if for_moderator:
fieldsets.append(
Row(
Column(
'visible',
css_class='small-12'
),
),
)
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_helper(form_tag=True, edit_mode=False):
"""
Post's form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
fieldsets = [
Row(
Column(
'text',
css_class='small-12'
),
),
]
# Threadwatch option is not in edit form
if not edit_mode:
fieldsets.append(
Row(
Column(
'threadwatch',
css_class='small-12'
),
),
)
fieldsets = fieldsets+[
ButtonHolderPanel(
Submit('submit', _('Submit')),
css_class='text-right',
),
]
helper.layout = Layout(*fieldsets)
return helper
def post_edit_helper(form_tag=True):
return post_helper(form_tag=form_tag, edit_mode=True)
def post_delete_helper(form_tag=True):
"""
Message's delete form layout helper
"""
helper = FormHelper()
helper.form_action = '.'
helper.attrs = {'data_abide': ''}
helper.form_tag = form_tag
helper.layout = Layout(
ButtonHolderPanel(
Row(
Column(
'confirm',
css_class='small-12 medium-8'
),
Column(
Submit('submit', _('Submit')),
css_class='small-12 medium-4 text-right'
),
),
),
)
return helper
|
emencia/emencia-django-forum
|
forum/forms/thread.py
|
ThreadCreateForm.clean_text
|
python
|
def clean_text(self):
text = self.cleaned_data.get("text")
validation_helper = safe_import_module(settings.FORUM_TEXT_VALIDATOR_HELPER_PATH)
if validation_helper is not None:
return validation_helper(self, text)
else:
return text
|
Text content validation
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/thread.py#L43-L52
|
[
"def safe_import_module(path, default=None):\n \"\"\"\n Try to import the specified module from the given Python path\n\n @path is a string containing a Python path to the wanted module, @default is \n an object to return if import fails, it can be None, a callable or whatever you need.\n\n Return a object or None\n \"\"\"\n if path is None:\n return default\n\n dot = path.rindex('.')\n module_name = path[:dot]\n class_name = path[dot + 1:]\n try:\n _class = getattr(import_module(module_name), class_name)\n return _class\n except (ImportError, AttributeError):\n warnings.warn('%s cannot be imported' % path, RuntimeWarning)\n return default\n"
] |
class ThreadCreateForm(CrispyFormMixin, forms.ModelForm):
"""
Thread's create form
"""
crispy_form_helper_path = 'forum.forms.crispies.thread_helper'
crispy_form_helper_kwargs = {}
text = forms.CharField(label=_('Message'), required=True, widget=forms.Textarea(attrs={'cols':'50'}))
threadwatch = forms.BooleanField(label=_("Watch this thread"), initial=settings.FORUM_DEFAULT_THREADWATCH_CHECKBOX, required=False, help_text=_("You will receive an email notification for each new post in this thread. You can disable it in the thread detail if needed."))
def __init__(self, *args, **kwargs):
self.author = kwargs.pop("user", None)
self.form_for_moderator = kwargs.pop("for_moderator", False)
# Hide some managers only fields from the crispy layout
self.crispy_form_helper_kwargs['for_moderator'] = self.form_for_moderator
super(ThreadCreateForm, self).__init__(*args, **kwargs)
super(forms.ModelForm, self).__init__(*args, **kwargs)
# Set the form field for Post.text
field_helper = safe_import_module(settings.FORUM_TEXT_FIELD_HELPER_PATH)
if field_helper is not None:
self.fields['text'] = field_helper(self, **{'label':_('message'), 'required':True})
# Remove some managers only fields from the form
if not self.form_for_moderator:
for k in ('closed','sticky','announce','visible'):
del self.fields[k]
def save(self):
# Crée le nouveau fil
thread_instance = self.category_instance.thread_set.create(
author=self.author,
subject=self.cleaned_data["subject"],
closed=self.cleaned_data.get("closed", False),
sticky=self.cleaned_data.get("sticky", False),
announce=self.cleaned_data.get("announce", False),
visible=self.cleaned_data.get("visible", True),
)
# Injecte son premier message
post_instance = thread_instance.post_set.create(
author=self.author,
text=self.cleaned_data["text"],
)
if self.cleaned_data.get("threadwatch", False):
threadwatch_instance = thread_instance.threadwatch_set.create(owner=self.author)
return thread_instance
class Meta:
model = Thread
exclude = ('category', 'author')
|
emencia/emencia-django-forum
|
forum/markup.py
|
clean_restructuredtext
|
python
|
def clean_restructuredtext(form_instance, content):
if content:
errors = SourceReporter(content)
if errors:
raise ValidationError(map(map_parsing_errors, errors))
return content
|
RST syntax validation
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/markup.py#L20-L28
| null |
"""
Some markup utilities for RST and DjangoCodeMirror usage
"""
from django.forms import ValidationError
from rstview.parser import SourceReporter, map_parsing_errors
from djangocodemirror.fields import DjangoCodeMirrorField
def get_text_field(form_instance, **kwargs):
"""
Return a DjangoCodeMirrorField field
"""
kwargs.update({
'config_name': 'forum'
})
return DjangoCodeMirrorField(**kwargs)
|
emencia/emencia-django-forum
|
forum/views/post.py
|
PostEditView.get_object
|
python
|
def get_object(self, *args, **kwargs):
self.category_instance = get_object_or_404(Category, slug=self.kwargs['category_slug'])
return get_object_or_404(Post, thread__id=self.kwargs['thread_id'], thread__category=self.category_instance, pk=self.kwargs['post_id'])
|
Should memoize the object to avoid multiple query if get_object is used many times in the view
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/views/post.py#L42-L47
| null |
class PostEditView(LoginRequiredMixin, ModeratorCheckMixin, generic.UpdateView):
"""
Message edit view
Restricted to message owner and moderators
"""
model = Post
form_class = PostEditForm
template_name = 'forum/post/form.html'
context_object_name = "post_instance"
def check_permissions(self, request):
self.object = self.get_object()
# Owner can edit its posts if FORUM_OWNER_MESSAGE_CAN_EDIT setting is True
if settings.FORUM_OWNER_MESSAGE_CAN_EDIT and self.object.author == request.user:
return False
return self.check_moderator_permissions(request)
def get_context_data(self, **kwargs):
context = super(PostEditView, self).get_context_data(**kwargs)
context.update({
'FORUM_TEXT_FIELD_JS_TEMPLATE': settings.FORUM_TEXT_FIELD_JS_TEMPLATE,
'FORUM_TEXT_MARKUP_RENDER_TEMPLATE': settings.FORUM_TEXT_MARKUP_RENDER_TEMPLATE,
'category_instance': self.category_instance,
'thread_instance': self.object.thread,
})
return context
def get_success_url(self):
return self.object.get_absolute_url()
def get(self, *args, **kwargs):
self.check_permissions(self.request)
return super(PostEditView, self).get(*args, **kwargs)
def post(self, *args, **kwargs):
self.check_permissions(self.request)
return super(PostEditView, self).post(*args, **kwargs)
|
emencia/emencia-django-forum
|
forum/forms/category.py
|
CategoryForm.clean_description
|
python
|
def clean_description(self):
description = self.cleaned_data.get("description")
validation_helper = safe_import_module(settings.FORUM_TEXT_VALIDATOR_HELPER_PATH)
if validation_helper is not None:
return validation_helper(self, description)
else:
return description
|
Text content validation
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/forms/category.py#L29-L38
|
[
"def safe_import_module(path, default=None):\n \"\"\"\n Try to import the specified module from the given Python path\n\n @path is a string containing a Python path to the wanted module, @default is \n an object to return if import fails, it can be None, a callable or whatever you need.\n\n Return a object or None\n \"\"\"\n if path is None:\n return default\n\n dot = path.rindex('.')\n module_name = path[:dot]\n class_name = path[dot + 1:]\n try:\n _class = getattr(import_module(module_name), class_name)\n return _class\n except (ImportError, AttributeError):\n warnings.warn('%s cannot be imported' % path, RuntimeWarning)\n return default\n"
] |
class CategoryForm(CrispyFormMixin, forms.ModelForm):
"""
Category form
"""
crispy_form_helper_path = 'forum.forms.crispies.category_helper'
def __init__(self, *args, **kwargs):
super(CategoryForm, self).__init__(*args, **kwargs)
super(forms.ModelForm, self).__init__(*args, **kwargs)
# Set the form field for Category.description
field_helper = safe_import_module(settings.FORUM_TEXT_FIELD_HELPER_PATH)
if field_helper is not None:
self.fields['description'] = field_helper(self, **{'label':_('description'), 'required':True})
class Meta:
model = Category
|
emencia/emencia-django-forum
|
forum/admin.py
|
ThreadAdmin.save_model
|
python
|
def save_model(self, request, obj, form, change):
instance = form.save(commit=False)
if not(instance.created):
instance.author = request.user
instance.save()
form.save_m2m()
return instance
|
Surclasse la méthode de sauvegarde de l'admin du modèle pour y
rajouter automatiquement l'auteur qui créé l'objet
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/admin.py#L9-L20
| null |
class ThreadAdmin(admin.ModelAdmin):
|
emencia/emencia-django-forum
|
forum/mixins.py
|
ModeratorCheckMixin.check_moderator_permissions
|
python
|
def check_moderator_permissions(self, request):
has_perms = self.has_moderator_permissions(request)
# Return a forbidden response if no permission has been finded
if not has_perms:
raise PermissionDenied
return False
|
Check if user have global or per object permission (on category
instance and on thread instance), finally return a 403 response if no
permissions has been finded.
If a permission has been finded, return False, then the dispatcher
should so return the "normal" response from the view.
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/mixins.py#L18-L33
|
[
"def has_moderator_permissions(self, request):\n \"\"\"\n Find if user have global or per object permission firstly on category instance, \n if not then on thread instance\n \"\"\"\n return any(request.user.has_perm(perm) for perm in self.permission_required)\n"
] |
class ModeratorCheckMixin(object):
"""
Mixin to include checking for moderator permission on category or thread
"""
permission_required = ['forum.moderate_category', 'forum.moderate_thread']
def check_moderator_permissions(self, request):
"""
Check if user have global or per object permission (on category
instance and on thread instance), finally return a 403 response if no
permissions has been finded.
If a permission has been finded, return False, then the dispatcher
should so return the "normal" response from the view.
"""
has_perms = self.has_moderator_permissions(request)
# Return a forbidden response if no permission has been finded
if not has_perms:
raise PermissionDenied
return False
def has_moderator_permissions(self, request):
"""
Find if user have global or per object permission firstly on category instance,
if not then on thread instance
"""
return any(request.user.has_perm(perm) for perm in self.permission_required)
|
emencia/emencia-django-forum
|
forum/mixins.py
|
ModeratorCheckMixin.has_moderator_permissions
|
python
|
def has_moderator_permissions(self, request):
return any(request.user.has_perm(perm) for perm in self.permission_required)
|
Find if user have global or per object permission firstly on category instance,
if not then on thread instance
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/mixins.py#L35-L40
| null |
class ModeratorCheckMixin(object):
"""
Mixin to include checking for moderator permission on category or thread
"""
permission_required = ['forum.moderate_category', 'forum.moderate_thread']
def check_moderator_permissions(self, request):
"""
Check if user have global or per object permission (on category
instance and on thread instance), finally return a 403 response if no
permissions has been finded.
If a permission has been finded, return False, then the dispatcher
should so return the "normal" response from the view.
"""
has_perms = self.has_moderator_permissions(request)
# Return a forbidden response if no permission has been finded
if not has_perms:
raise PermissionDenied
return False
|
emencia/emencia-django-forum
|
forum/utils/imports.py
|
safe_import_module
|
python
|
def safe_import_module(path, default=None):
if path is None:
return default
dot = path.rindex('.')
module_name = path[:dot]
class_name = path[dot + 1:]
try:
_class = getattr(import_module(module_name), class_name)
return _class
except (ImportError, AttributeError):
warnings.warn('%s cannot be imported' % path, RuntimeWarning)
return default
|
Try to import the specified module from the given Python path
@path is a string containing a Python path to the wanted module, @default is
an object to return if import fails, it can be None, a callable or whatever you need.
Return a object or None
|
train
|
https://github.com/emencia/emencia-django-forum/blob/cda74ed7e5822675c340ee5ec71548d981bccd3b/forum/utils/imports.py#L6-L26
| null |
# -*- coding: utf-8 -*-
import warnings
from django.utils.importlib import import_module
def safe_import_module(path, default=None):
"""
Try to import the specified module from the given Python path
@path is a string containing a Python path to the wanted module, @default is
an object to return if import fails, it can be None, a callable or whatever you need.
Return a object or None
"""
if path is None:
return default
dot = path.rindex('.')
module_name = path[:dot]
class_name = path[dot + 1:]
try:
_class = getattr(import_module(module_name), class_name)
return _class
except (ImportError, AttributeError):
warnings.warn('%s cannot be imported' % path, RuntimeWarning)
return default
|
foxx/python-helpful
|
helpful.py
|
unique_iter
|
python
|
def unique_iter(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
|
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L55-L61
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
ensure_subclass
|
python
|
def ensure_subclass(value, types):
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
|
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L157-L171
|
[
"def ensure_class(obj):\n \"\"\"\n Ensure object is a class\n\n >>> ensure_class(object)\n >>> ensure_class(object())\n Traceback (most recent call last):\n TypeError:\n >>> ensure_class(1)\n Traceback (most recent call last):\n TypeError:\n \"\"\"\n if not inspect.isclass(obj):\n raise TypeError(\"Expected class, got {}\".format(obj))\n"
] |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
ensure_instance
|
python
|
def ensure_instance(value, types):
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
|
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L173-L193
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
iter_ensure_instance
|
python
|
def iter_ensure_instance(iterable, types):
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
|
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L196-L209
|
[
"def ensure_instance(value, types):\n \"\"\"\n Ensure value is an instance of a certain type\n\n >>> ensure_instance(1, [str])\n Traceback (most recent call last):\n TypeError:\n\n >>> ensure_instance(1, str)\n Traceback (most recent call last):\n TypeError:\n\n >>> ensure_instance(1, int)\n >>> ensure_instance(1, (int, str))\n\n :attr types: Type of list of types\n \"\"\"\n if not isinstance(value, types):\n raise TypeError(\n \"expected instance of {}, got {}\".format(\n types, value))\n"
] |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
import_recursive
|
python
|
def import_recursive(path):
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
|
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L224-L241
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
extend_instance
|
python
|
def extend_instance(instance, *bases, **kwargs):
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
|
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L244-L281
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
add_bases
|
python
|
def add_bases(cls, *bases):
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
|
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L284-L301
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
subclass
|
python
|
def subclass(cls, *bases, **kwargs):
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
|
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L304-L329
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
import_from_path
|
python
|
def import_from_path(path):
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
|
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L332-L365
|
[
"def does_module_exist(path):\n \"\"\"\n Check if Python module exists at path\n\n >>> does_module_exist('os.path')\n True\n >>> does_module_exist('dummy.app')\n False\n \"\"\"\n try:\n importlib.import_module(path)\n return True\n except ImportError:\n return False\n"
] |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
sort_dict_by_key
|
python
|
def sort_dict_by_key(obj):
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
|
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L384-L392
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
generate_random_token
|
python
|
def generate_random_token(length=32):
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
|
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L395-L405
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
default
|
python
|
def default(*args, **kwargs):
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
|
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L408-L423
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
urljoin
|
python
|
def urljoin(*args):
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
|
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L426-L435
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
is_int
|
python
|
def is_int(value):
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError()
|
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L455-L478
|
[
"def ensure_instance(value, types):\n \"\"\"\n Ensure value is an instance of a certain type\n\n >>> ensure_instance(1, [str])\n Traceback (most recent call last):\n TypeError:\n\n >>> ensure_instance(1, str)\n Traceback (most recent call last):\n TypeError:\n\n >>> ensure_instance(1, int)\n >>> ensure_instance(1, (int, str))\n\n :attr types: Type of list of types\n \"\"\"\n if not isinstance(value, types):\n raise TypeError(\n \"expected instance of {}, got {}\".format(\n types, value))\n"
] |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
# pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
padded_split
|
python
|
def padded_split(value, sep, maxsplit=None, pad=None):
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
|
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L481-L508
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
coerce_to_bytes
|
python
|
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
|
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L511-L543
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def random_date_between(start_date, end_date):
"""Return random date between start/end"""
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
random_date_between
|
python
|
def random_date_between(start_date, end_date):
assert isinstance(start_date, datetime.date)
delta_secs = int((end_date - start_date).total_seconds())
delta = datetime.timedelta(seconds=random.randint(0, delta_secs))
return (start_date + delta)
|
Return random date between start/end
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L567-L572
| null |
import six
import os
import shutil
import warnings
import tempfile
import importlib
import pkgutil
import inspect
import string
import random
import sys
import itertools
import datetime
from decimal import Decimal
from collections import OrderedDict, Iterable
if six.PY2: # pragma: nocover
text_type = unicode
string_types = (str, unicode)
else: # pragma: nocover
string_types = (str, )
text_type = str
NoneType = type(None)
###########################################################
# Mixins
###########################################################
class ClassDictMixin():
"""
Dict which can be accessed via class attributes
Thanks http://www.goodcode.io/blog/python-dict-object/
"""
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
if name in self:
del self[name]
else:
raise AttributeError("No such attribute: " + name)
def copy(self):
# XXX: needs UT
return self.__class__(**self)
def unique_iter(seq):
"""
See http://www.peterbe.com/plog/uniqifiers-benchmark
Originally f8 written by Dave Kirby
"""
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
def flatteniter(iter_lst):
"""
>>> flatteniter([[1,2,3], [4,5,6]])
[1, 2, 3, 4, 5, 6]
"""
return list(itertools.chain(*iter_lst))
class HashableDictMixin(object):
def __hash__(self):
"""
This /should/ allow object to be hashable, for use in a set
XXX: Needs UT
Thanks Raymond @ http://stackoverflow.com/a/16162138/1267398
"""
return hash((frozenset(self), frozenset(self.values())))
###########################################################
# Hashable dict
###########################################################
class ClassDict(ClassDictMixin, dict):
"""
>>> d = ClassDict(hello="world")
>>> d.hello
'world'
>>> d.get('hello')
'world'
>>> d.hello = 'wtf'
>>> d.hello
'wtf'
>>> d['hello']
'wtf'
>>> d.world
Traceback (most recent call last):
AttributeError:
>>> del d.hello
>>> del d.world
Traceback (most recent call last):
AttributeError:
>>> d.hello = 1
>>> b = d.copy()
>>> b.hello = 2
>>> b.hello == d.hello
False
"""
class HashableDict(HashableDictMixin, dict):
"""
>>> hash(HashableDict(a=1, b=2)) is not None
True
"""
class HashableOrderedDict(HashableDictMixin, OrderedDict):
"""
>>> hash(HashableOrderedDict(a=1, b=2)) is not None
True
"""
def ensure_class(obj):
"""
Ensure object is a class
>>> ensure_class(object)
>>> ensure_class(object())
Traceback (most recent call last):
TypeError:
>>> ensure_class(1)
Traceback (most recent call last):
TypeError:
"""
if not inspect.isclass(obj):
raise TypeError("Expected class, got {}".format(obj))
def iter_ensure_class(iterable):
"""
Ensure every item in iterable is a class
>>> iter_ensure_class([object, object])
>>> iter_ensure_class([object, object()])
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_class(item) for item in iterable ]
def ensure_subclass(value, types):
"""
Ensure value is a subclass of types
>>> class Hello(object): pass
>>> ensure_subclass(Hello, Hello)
>>> ensure_subclass(object, Hello)
Traceback (most recent call last):
TypeError:
"""
ensure_class(value)
if not issubclass(value, types):
raise TypeError(
"expected subclass of {}, not {}".format(
types, value))
def ensure_instance(value, types):
"""
Ensure value is an instance of a certain type
>>> ensure_instance(1, [str])
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, str)
Traceback (most recent call last):
TypeError:
>>> ensure_instance(1, int)
>>> ensure_instance(1, (int, str))
:attr types: Type of list of types
"""
if not isinstance(value, types):
raise TypeError(
"expected instance of {}, got {}".format(
types, value))
def iter_ensure_instance(iterable, types):
"""
Iterate over object and check each item type
>>> iter_ensure_instance([1,2,3], [str])
Traceback (most recent call last):
TypeError:
>>> iter_ensure_instance([1,2,3], int)
>>> iter_ensure_instance(1, int)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(iterable, Iterable)
[ ensure_instance(item, types) for item in iterable ]
def touch(path, times=None):
"""
Implements unix utility `touch`
XXX: Needs UT
:attr fname: File path
:attr times: See `os.utime()` for args
https://docs.python.org/3.4/library/os.html#os.utime
"""
with open(path, 'a'):
os.utime(path, times)
def import_recursive(path):
"""
Recursively import all modules and packages
Thanks http://stackoverflow.com/a/25562415/1267398
XXX: Needs UT
:attr path: Path to package/module
"""
results = {}
obj = importlib.import_module(path)
results[path] = obj
path = getattr(obj, '__path__', os.path.dirname(obj.__file__))
for loader, name, is_pkg in pkgutil.walk_packages(path):
full_name = obj.__name__ + '.' + name
results[full_name] = importlib.import_module(full_name)
if is_pkg:
results.update(import_recursive(full_name))
return results
def extend_instance(instance, *bases, **kwargs):
"""
Apply subclass (mixin) to a class object or its instance
By default, the mixin is placed at the start of bases
to ensure its called first as per MRO. If you wish to
have it injected last, which is useful for monkeypatching,
then you can specify 'last=True'. See here:
http://stackoverflow.com/a/10018792/1267398
:attr cls: Target object
:type cls: Class instance
:attr bases: List of new bases to subclass with
:attr last: Inject new bases after existing bases
:type last: bool
>>> class A(object): pass
>>> class B(object): pass
>>> a = A()
>>> b = B()
>>> isinstance(b, A)
False
>>> extend_instance(b, A)
>>> isinstance(b, A)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
assert not inspect.isclass(instance)
base_cls = instance.__class__
base_cls_name = instance.__class__.__name__
new_bases = (base_cls,)+bases if last else bases+(base_cls,)
new_cls = type(base_cls_name, tuple(new_bases), {})
setattr(instance, '__class__', new_cls)
def add_bases(cls, *bases):
"""
Add bases to class
>>> class Base(object): pass
>>> class A(Base): pass
>>> class B(Base): pass
>>> issubclass(A, B)
False
>>> add_bases(A, B)
>>> issubclass(A, B)
True
"""
assert inspect.isclass(cls), "Expected class object"
for mixin in bases:
assert inspect.isclass(mixin), "Expected class object for bases"
new_bases = (bases + cls.__bases__)
cls.__bases__ = new_bases
def subclass(cls, *bases, **kwargs):
"""
Add bases to class (late subclassing)
Annoyingly we cannot yet modify __bases__ of an existing
class, instead we must create another subclass, see here;
http://bugs.python.org/issue672115
>>> class A(object): pass
>>> class B(object): pass
>>> class C(object): pass
>>> issubclass(B, A)
False
>>> D = subclass(B, A)
>>> issubclass(D, A)
True
>>> issubclass(D, B)
True
"""
last = kwargs.get('last', False)
bases = tuple(bases)
for base in bases:
assert inspect.isclass(base), "bases must be classes"
new_bases = (cls,)+bases if last else bases+(cls,)
new_cls = type(cls.__name__, tuple(new_bases), {})
return new_cls
def import_from_path(path):
"""
Imports a package, module or attribute from path
Thanks http://stackoverflow.com/a/14050282/1267398
>>> import_from_path('os.path')
<module 'posixpath' ...
>>> import_from_path('os.path.basename')
<function basename at ...
>>> import_from_path('os')
<module 'os' from ...
>>> import_from_path('getrektcunt')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc')
Traceback (most recent call last):
ImportError:
>>> import_from_path('os.dummyfunc.dummylol')
Traceback (most recent call last):
ImportError:
"""
try:
return importlib.import_module(path)
except ImportError:
if '.' not in path:
raise
module_name, attr_name = path.rsplit('.', 1)
if not does_module_exist(module_name):
raise ImportError("No object found at '{}'".format(path))
mod = importlib.import_module(module_name)
if not hasattr(mod, attr_name):
raise ImportError("No object found at '{}'".format(path))
return getattr(mod, attr_name)
def does_module_exist(path):
"""
Check if Python module exists at path
>>> does_module_exist('os.path')
True
>>> does_module_exist('dummy.app')
False
"""
try:
importlib.import_module(path)
return True
except ImportError:
return False
def sort_dict_by_key(obj):
"""
Sort dict by its keys
>>> sort_dict_by_key(dict(c=1, b=2, a=3, d=4))
OrderedDict([('a', 3), ('b', 2), ('c', 1), ('d', 4)])
"""
sort_func = lambda x: x[0]
return OrderedDict(sorted(obj.items(), key=sort_func))
def generate_random_token(length=32):
"""
Generate random secure token
>>> len(generate_random_token())
32
>>> len(generate_random_token(6))
6
"""
chars = (string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(random.choice(chars) for _ in range(length))
def default(*args, **kwargs):
"""
Return first argument which is "truthy"
>>> default(None, None, 1)
1
>>> default(None, None, 123)
123
>>> print(default(None, None))
None
"""
default = kwargs.get('default', None)
for arg in args:
if arg:
return arg
return default
def urljoin(*args):
"""
Joins given arguments into a url, removing duplicate slashes
Thanks http://stackoverflow.com/a/11326230/1267398
>>> urljoin('/lol', '///lol', '/lol//')
'/lol/lol/lol'
"""
value = "/".join(map(lambda x: str(x).strip('/'), args))
return "/{}".format(value)
def is_hex(value):
"""
Check if value is hex
>>> is_hex('abab')
True
>>> is_hex('gg')
False
"""
try:
int(value, 16)
except ValueError:
return False
else:
return True
def is_int(value):
"""
Check if value is an int
:type value: int, str, bytes, float, Decimal
>>> is_int(123), is_int('123'), is_int(Decimal('10'))
(True, True, True)
>>> is_int(1.1), is_int('1.1'), is_int(Decimal('10.1'))
(False, False, False)
>>> is_int(object)
Traceback (most recent call last):
TypeError:
"""
ensure_instance(value, (int, str, bytes, float, Decimal))
if isinstance(value, int):
return True
elif isinstance(value, float):
return False
elif isinstance(value, Decimal):
return str(value).isdigit()
elif isinstance(value, (str, bytes)):
return value.isdigit()
raise ValueError() # pragma: nocover
def padded_split(value, sep, maxsplit=None, pad=None):
"""
Modified split() to include padding
See http://code.activestate.com/lists/python-ideas/3366/
:attr value: see str.split()
:attr sep: see str.split()
:attr maxsplit: see str.split()
:attr pad: Value to use for padding maxsplit
>>> padded_split('text/html', ';', 1)
['text/html', None]
>>> padded_split('text/html;q=1', ';', 1)
['text/html', 'q=1']
>>> padded_split('text/html;a=1;b=2', ';', 1)
['text/html', 'a=1;b=2']
>>> padded_split('text/html', ';', 1, True)
['text/html', True]
>>> padded_split('text/html;a=1;b=2', ';', 2)
['text/html', 'a=1', 'b=2']
>>> padded_split('text/html;a=1', ';', 2)
['text/html', 'a=1', None]
"""
result = value.split(sep, maxsplit)
if maxsplit is not None:
result.extend(
[pad] * (1+maxsplit-len(result)))
return result
def coerce_to_bytes(x, charset=sys.getdefaultencoding(), errors='strict'):
"""
Coerce value to bytes
>>> a = coerce_to_bytes('hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(b'hello')
>>> assert isinstance(a, bytes)
>>> a = coerce_to_bytes(None)
>>> assert a is None
>>> coerce_to_bytes(object())
Traceback (most recent call last):
...
TypeError: Cannot coerce to bytes
"""
PY2 = sys.version_info[0] == 2
if PY2: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, buffer)):
return bytes(x)
if isinstance(x, unicode):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
else: # pragma: nocover
if x is None:
return None
if isinstance(x, (bytes, bytearray, memoryview)):
return bytes(x)
if isinstance(x, str):
return x.encode(charset, errors)
raise TypeError('Cannot coerce to bytes')
def get_exception():
"""
Workaround for the missing "as" keyword in py3k.
XXX: needs UT
"""
return sys.exc_info()[1]
def makelist(data):
"""
Thanks bottle
XXX: needs UT
"""
if isinstance(data, (list, set, tuple)):
return list(data)
elif data:
return [data]
else:
return []
def datetime_to_epoch(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def cleanup(self):
"""Remove any created temp paths"""
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
foxx/python-helpful
|
helpful.py
|
Tempfile.cleanup
|
python
|
def cleanup(self):
for path in self.paths:
if isinstance(path, tuple):
os.close(path[0])
os.unlink(path[1])
else:
shutil.rmtree(path)
self.paths = []
|
Remove any created temp paths
|
train
|
https://github.com/foxx/python-helpful/blob/e31ad9bdf45051d8b9a0d1808d214e2293c3bbed/helpful.py#L599-L607
| null |
class Tempfile(object):
"""
Tempfile wrapper with cleanup support
XXX: Needs UT
"""
def __init__(self):
self.paths = []
def mkstemp(self, *args, **kwargs):
path = tempfile.mkstemp(*args, **kwargs)
self.paths.append(path)
return path
def mkdtemp(self, *args, **kwargs):
path = tempfile.mkdtemp(*args, **kwargs)
self.paths.append(path)
return path
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cleanup()
|
dstufft/crust
|
crust/utils.py
|
subclass_exception
|
python
|
def subclass_exception(name, parents, module, attached_to=None):
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
|
Create exception subclass.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/utils.py#L7-L28
| null |
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
dstufft/crust
|
crust/resources.py
|
Resource.save
|
python
|
def save(self, force_insert=False, force_update=False):
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in resource saving.")
data = {}
for name, field in self._meta.fields.items():
if field.serialize:
data[name] = field.dehydrate(getattr(self, name, None))
insert = True if force_insert or self.resource_uri is None else False
if insert:
resp = self._meta.api.http_resource("POST", self._meta.resource_name, data=self._meta.api.resource_serialize(data))
else:
resp = self._meta.api.http_resource("PUT", self.resource_uri, data=self._meta.api.resource_serialize(data))
if "Location" in resp.headers:
resp = self._meta.api.http_resource("GET", resp.headers["Location"])
elif resp.status_code == 204:
resp = self._meta.api.http_resource("GET", self.resource_uri)
else:
return
data = self._meta.api.resource_deserialize(resp.text)
# Update local values from the API Response
self.__init__(**data)
|
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be a POST or PUT respectively. Normally, they
should not be set.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/resources.py#L129-L164
| null |
class Resource(six.with_metaclass(ResourceBase, object)):
def __init__(self, resource_uri=None, *args, **kwargs):
self.resource_uri = resource_uri
for name, field in self._meta.fields.items():
val = kwargs.pop(name, None)
setattr(self, name, field.hydrate(val))
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = "[Bad Unicode data]"
return "<%s: %s>" % (self.__class__.__name__, u)
def __str__(self):
if not six.PY3 and hasattr(self, "__unicode__"):
return self.encode("utf-8")
return "%s object" % self.__class__.__name__
def delete(self):
"""
Deletes the current instance. Override this in a subclass if you want to
control the deleting process.
"""
if self.resource_uri is None:
raise ValueError("{0} object cannot be deleted because resource_uri attribute cannot be None".format(self._meta.resource_name))
self._meta.api.http_resource("DELETE", self.resource_uri)
|
dstufft/crust
|
crust/resources.py
|
Resource.delete
|
python
|
def delete(self):
if self.resource_uri is None:
raise ValueError("{0} object cannot be deleted because resource_uri attribute cannot be None".format(self._meta.resource_name))
self._meta.api.http_resource("DELETE", self.resource_uri)
|
Deletes the current instance. Override this in a subclass if you want to
control the deleting process.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/resources.py#L166-L174
| null |
class Resource(six.with_metaclass(ResourceBase, object)):
def __init__(self, resource_uri=None, *args, **kwargs):
self.resource_uri = resource_uri
for name, field in self._meta.fields.items():
val = kwargs.pop(name, None)
setattr(self, name, field.hydrate(val))
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = "[Bad Unicode data]"
return "<%s: %s>" % (self.__class__.__name__, u)
def __str__(self):
if not six.PY3 and hasattr(self, "__unicode__"):
return self.encode("utf-8")
return "%s object" % self.__class__.__name__
def save(self, force_insert=False, force_update=False):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be a POST or PUT respectively. Normally, they
should not be set.
"""
if force_insert and force_update:
raise ValueError("Cannot force both insert and updating in resource saving.")
data = {}
for name, field in self._meta.fields.items():
if field.serialize:
data[name] = field.dehydrate(getattr(self, name, None))
insert = True if force_insert or self.resource_uri is None else False
if insert:
resp = self._meta.api.http_resource("POST", self._meta.resource_name, data=self._meta.api.resource_serialize(data))
else:
resp = self._meta.api.http_resource("PUT", self.resource_uri, data=self._meta.api.resource_serialize(data))
if "Location" in resp.headers:
resp = self._meta.api.http_resource("GET", resp.headers["Location"])
elif resp.status_code == 204:
resp = self._meta.api.http_resource("GET", self.resource_uri)
else:
return
data = self._meta.api.resource_deserialize(resp.text)
# Update local values from the API Response
self.__init__(**data)
|
dstufft/crust
|
crust/api.py
|
Api.http_resource
|
python
|
def http_resource(self, method, url, params=None, data=None):
url = urllib_parse.urljoin(self.url, url)
url = url if url.endswith("/") else url + "/"
headers = None
if method.lower() in self.unsupported_methods:
headers = {"X-HTTP-Method-Override": method.upper()}
method = "POST"
r = self.session.request(method, url, params=params, data=data, headers=headers)
r.raise_for_status()
return r
|
Makes an HTTP request.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/api.py#L69-L87
| null |
class Api(object):
resources = {}
unsupported_methods = []
def __init__(self, session=None, *args, **kwargs):
super(Api, self).__init__(*args, **kwargs)
if session is None:
session = requests.session()
self.session = session
self.unsupported_methods = [method.lower() for method in self.unsupported_methods]
# Initialize the APIs
for cls in self.resources.values():
cls._meta.api = self
self.configure()
def __getattr__(self, name):
if name in self.resources:
return self.resources[name]
raise AttributeError("'{0}' object has no attribute '{1}'".format(self.__class__.__name__, name))
def configure(self):
self.session.headers.update({"Content-Type": "application/json", "Accept": "application/json"})
@classmethod
def bind(cls, resource):
instance = resource()
cls.resources[instance._meta.resource_name] = resource
return resource
@staticmethod
def resource_serialize(o):
"""
Returns JSON serialization of given object.
"""
return json.dumps(o)
@staticmethod
def resource_deserialize(s):
"""
Returns dict deserialization of a given JSON string.
"""
try:
return json.loads(s)
except ValueError:
raise ResponseError("The API Response was not valid.")
|
dstufft/crust
|
crust/query.py
|
Query.clone
|
python
|
def clone(self, klass=None, memo=None, **kwargs):
obj = Empty()
obj.__class__ = klass or self.__class__
obj.resource = self.resource
obj.filters = self.filters.copy()
obj.order_by = self.order_by
obj.low_mark = self.low_mark
obj.high_mark = self.high_mark
obj.__dict__.update(kwargs)
return obj
|
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L35-L53
| null |
class Query(object):
"""
A single API query.
"""
def __init__(self, resource, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
self.resource = resource
self.filters = {}
self.order_by = None
self.low_mark = 0
self.high_mark = None
def add_filters(self, **filters):
"""
Adjusts the filters that should be applied to the request to the API.
"""
self.filters.update(filters)
def add_ordering(self, ordering=None):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-').
If 'ordering' is empty, all ordering is cleared from the query.
"""
if ordering is not None:
self.order_by = ordering
else:
self.clear_ordering()
def clear_ordering(self):
"""
Removes any ordering settings.
"""
self.order_by = None
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def results(self, limit=100):
"""
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
"""
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item
def delete(self):
"""
Deletes the results of this query, it first fetches all the items to be
deletes and then issues a PATCH against the list uri of the resource.
"""
uris = [obj["resource_uri"] for obj in self.results()]
data = self.resource._meta.api.resource_serialize({"objects": [], "deleted_objects": uris})
self.resource._meta.api.http_resource("PATCH", self.resource._meta.resource_name, data=data)
return len(uris)
def get_params(self):
params = {}
# Apply filters
params.update(self.filters)
# Apply Ordering
if self.order_by is not None:
params["order_by"] = self.order_by
return params
def get_count(self):
"""
Gets the total_count using the current filter constraints.
"""
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = 1
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
number = data["meta"]["total_count"]
# Apply offset and limit constraints manually, since using limit/offset
# in the API doesn't change the total_count output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def has_results(self):
q = self.clone()
q.clear_ordering()
q.set_limits(high=1)
return bool(list(q.results()))
|
dstufft/crust
|
crust/query.py
|
Query.set_limits
|
python
|
def set_limits(self, low=None, high=None):
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
|
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L80-L99
| null |
class Query(object):
"""
A single API query.
"""
def __init__(self, resource, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
self.resource = resource
self.filters = {}
self.order_by = None
self.low_mark = 0
self.high_mark = None
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.resource = self.resource
obj.filters = self.filters.copy()
obj.order_by = self.order_by
obj.low_mark = self.low_mark
obj.high_mark = self.high_mark
obj.__dict__.update(kwargs)
return obj
def add_filters(self, **filters):
"""
Adjusts the filters that should be applied to the request to the API.
"""
self.filters.update(filters)
def add_ordering(self, ordering=None):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-').
If 'ordering' is empty, all ordering is cleared from the query.
"""
if ordering is not None:
self.order_by = ordering
else:
self.clear_ordering()
def clear_ordering(self):
"""
Removes any ordering settings.
"""
self.order_by = None
def results(self, limit=100):
"""
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
"""
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item
def delete(self):
"""
Deletes the results of this query, it first fetches all the items to be
deletes and then issues a PATCH against the list uri of the resource.
"""
uris = [obj["resource_uri"] for obj in self.results()]
data = self.resource._meta.api.resource_serialize({"objects": [], "deleted_objects": uris})
self.resource._meta.api.http_resource("PATCH", self.resource._meta.resource_name, data=data)
return len(uris)
def get_params(self):
params = {}
# Apply filters
params.update(self.filters)
# Apply Ordering
if self.order_by is not None:
params["order_by"] = self.order_by
return params
def get_count(self):
"""
Gets the total_count using the current filter constraints.
"""
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = 1
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
number = data["meta"]["total_count"]
# Apply offset and limit constraints manually, since using limit/offset
# in the API doesn't change the total_count output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def has_results(self):
q = self.clone()
q.clear_ordering()
q.set_limits(high=1)
return bool(list(q.results()))
|
dstufft/crust
|
crust/query.py
|
Query.results
|
python
|
def results(self, limit=100):
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item
|
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L101-L132
|
[
"def get_params(self):\n params = {}\n\n # Apply filters\n params.update(self.filters)\n\n # Apply Ordering\n if self.order_by is not None:\n params[\"order_by\"] = self.order_by\n\n return params\n"
] |
class Query(object):
"""
A single API query.
"""
def __init__(self, resource, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
self.resource = resource
self.filters = {}
self.order_by = None
self.low_mark = 0
self.high_mark = None
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.resource = self.resource
obj.filters = self.filters.copy()
obj.order_by = self.order_by
obj.low_mark = self.low_mark
obj.high_mark = self.high_mark
obj.__dict__.update(kwargs)
return obj
def add_filters(self, **filters):
"""
Adjusts the filters that should be applied to the request to the API.
"""
self.filters.update(filters)
def add_ordering(self, ordering=None):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-').
If 'ordering' is empty, all ordering is cleared from the query.
"""
if ordering is not None:
self.order_by = ordering
else:
self.clear_ordering()
def clear_ordering(self):
"""
Removes any ordering settings.
"""
self.order_by = None
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def delete(self):
"""
Deletes the results of this query, it first fetches all the items to be
deletes and then issues a PATCH against the list uri of the resource.
"""
uris = [obj["resource_uri"] for obj in self.results()]
data = self.resource._meta.api.resource_serialize({"objects": [], "deleted_objects": uris})
self.resource._meta.api.http_resource("PATCH", self.resource._meta.resource_name, data=data)
return len(uris)
def get_params(self):
params = {}
# Apply filters
params.update(self.filters)
# Apply Ordering
if self.order_by is not None:
params["order_by"] = self.order_by
return params
def get_count(self):
"""
Gets the total_count using the current filter constraints.
"""
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = 1
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
number = data["meta"]["total_count"]
# Apply offset and limit constraints manually, since using limit/offset
# in the API doesn't change the total_count output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def has_results(self):
q = self.clone()
q.clear_ordering()
q.set_limits(high=1)
return bool(list(q.results()))
|
dstufft/crust
|
crust/query.py
|
Query.delete
|
python
|
def delete(self):
uris = [obj["resource_uri"] for obj in self.results()]
data = self.resource._meta.api.resource_serialize({"objects": [], "deleted_objects": uris})
self.resource._meta.api.http_resource("PATCH", self.resource._meta.resource_name, data=data)
return len(uris)
|
Deletes the results of this query, it first fetches all the items to be
deletes and then issues a PATCH against the list uri of the resource.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L134-L143
|
[
"def results(self, limit=100):\n \"\"\"\n Yields the results from the API, efficiently handling the pagination and\n properly passing all paramaters.\n \"\"\"\n limited = True if self.high_mark is not None else False\n rmax = self.high_mark - self.low_mark if limited else None\n rnum = 0\n\n params = self.get_params()\n params[\"offset\"] = self.low_mark\n params[\"limit\"] = limit\n\n while not limited and rmax is None or rnum < rmax:\n if limited or rmax is not None:\n rleft = rmax - rnum\n params[\"limit\"] = rleft if rleft < limit else limit\n\n r = self.resource._meta.api.http_resource(\"GET\", self.resource._meta.resource_name, params=params)\n data = self.resource._meta.api.resource_deserialize(r.text)\n\n if not limited:\n rmax = data[\"meta\"][\"total_count\"]\n\n if data[\"meta\"][\"total_count\"] < rmax:\n rmax = data[\"meta\"][\"total_count\"]\n\n params[\"offset\"] = data[\"meta\"][\"offset\"] + data[\"meta\"][\"limit\"]\n\n for item in data[\"objects\"]:\n rnum += 1\n yield item\n"
] |
class Query(object):
"""
A single API query.
"""
def __init__(self, resource, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
self.resource = resource
self.filters = {}
self.order_by = None
self.low_mark = 0
self.high_mark = None
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.resource = self.resource
obj.filters = self.filters.copy()
obj.order_by = self.order_by
obj.low_mark = self.low_mark
obj.high_mark = self.high_mark
obj.__dict__.update(kwargs)
return obj
def add_filters(self, **filters):
"""
Adjusts the filters that should be applied to the request to the API.
"""
self.filters.update(filters)
def add_ordering(self, ordering=None):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-').
If 'ordering' is empty, all ordering is cleared from the query.
"""
if ordering is not None:
self.order_by = ordering
else:
self.clear_ordering()
def clear_ordering(self):
"""
Removes any ordering settings.
"""
self.order_by = None
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def results(self, limit=100):
"""
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
"""
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item
def get_params(self):
params = {}
# Apply filters
params.update(self.filters)
# Apply Ordering
if self.order_by is not None:
params["order_by"] = self.order_by
return params
def get_count(self):
"""
Gets the total_count using the current filter constraints.
"""
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = 1
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
number = data["meta"]["total_count"]
# Apply offset and limit constraints manually, since using limit/offset
# in the API doesn't change the total_count output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def has_results(self):
q = self.clone()
q.clear_ordering()
q.set_limits(high=1)
return bool(list(q.results()))
|
dstufft/crust
|
crust/query.py
|
Query.get_count
|
python
|
def get_count(self):
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = 1
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
number = data["meta"]["total_count"]
# Apply offset and limit constraints manually, since using limit/offset
# in the API doesn't change the total_count output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
|
Gets the total_count using the current filter constraints.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L157-L176
|
[
"def get_params(self):\n params = {}\n\n # Apply filters\n params.update(self.filters)\n\n # Apply Ordering\n if self.order_by is not None:\n params[\"order_by\"] = self.order_by\n\n return params\n"
] |
class Query(object):
"""
A single API query.
"""
def __init__(self, resource, *args, **kwargs):
super(Query, self).__init__(*args, **kwargs)
self.resource = resource
self.filters = {}
self.order_by = None
self.low_mark = 0
self.high_mark = None
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.resource = self.resource
obj.filters = self.filters.copy()
obj.order_by = self.order_by
obj.low_mark = self.low_mark
obj.high_mark = self.high_mark
obj.__dict__.update(kwargs)
return obj
def add_filters(self, **filters):
"""
Adjusts the filters that should be applied to the request to the API.
"""
self.filters.update(filters)
def add_ordering(self, ordering=None):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-').
If 'ordering' is empty, all ordering is cleared from the query.
"""
if ordering is not None:
self.order_by = ordering
else:
self.clear_ordering()
def clear_ordering(self):
"""
Removes any ordering settings.
"""
self.order_by = None
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the API query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def results(self, limit=100):
"""
Yields the results from the API, efficiently handling the pagination and
properly passing all paramaters.
"""
limited = True if self.high_mark is not None else False
rmax = self.high_mark - self.low_mark if limited else None
rnum = 0
params = self.get_params()
params["offset"] = self.low_mark
params["limit"] = limit
while not limited and rmax is None or rnum < rmax:
if limited or rmax is not None:
rleft = rmax - rnum
params["limit"] = rleft if rleft < limit else limit
r = self.resource._meta.api.http_resource("GET", self.resource._meta.resource_name, params=params)
data = self.resource._meta.api.resource_deserialize(r.text)
if not limited:
rmax = data["meta"]["total_count"]
if data["meta"]["total_count"] < rmax:
rmax = data["meta"]["total_count"]
params["offset"] = data["meta"]["offset"] + data["meta"]["limit"]
for item in data["objects"]:
rnum += 1
yield item
def delete(self):
"""
Deletes the results of this query, it first fetches all the items to be
deletes and then issues a PATCH against the list uri of the resource.
"""
uris = [obj["resource_uri"] for obj in self.results()]
data = self.resource._meta.api.resource_serialize({"objects": [], "deleted_objects": uris})
self.resource._meta.api.http_resource("PATCH", self.resource._meta.resource_name, data=data)
return len(uris)
def get_params(self):
params = {}
# Apply filters
params.update(self.filters)
# Apply Ordering
if self.order_by is not None:
params["order_by"] = self.order_by
return params
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def has_results(self):
q = self.clone()
q.clear_ordering()
q.set_limits(high=1)
return bool(list(q.results()))
|
dstufft/crust
|
crust/query.py
|
QuerySet.iterator
|
python
|
def iterator(self):
for item in self.query.results():
obj = self.resource(**item)
yield obj
|
An iterator over the results from applying this QuerySet to the api.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L335-L343
| null |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.count
|
python
|
def count(self):
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
|
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L345-L355
| null |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.get
|
python
|
def get(self, *args, **kwargs):
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
|
Performs the query and returns a single object matching the given
keyword arguments.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L357-L380
|
[
"def filter(self, **kwargs):\n \"\"\"\n Returns a new QuerySet instance with the args ANDed to the existing\n set.\n \"\"\"\n if kwargs:\n assert self.query.can_filter(), \"Cannot filter a query once a slice has been taken.\"\n\n clone = self._clone()\n clone.query.add_filters(**kwargs)\n\n return clone\n"
] |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.create
|
python
|
def create(self, **kwargs):
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
|
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L382-L389
| null |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.get_or_create
|
python
|
def get_or_create(self, **kwargs):
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
|
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L391-L409
|
[
"def get(self, *args, **kwargs):\n \"\"\"\n Performs the query and returns a single object matching the given\n keyword arguments.\n \"\"\"\n clone = self.filter(*args, **kwargs)\n\n if self.query.can_filter():\n clone = clone.order_by()\n\n num = len(clone)\n\n if num == 1:\n return clone._result_cache[0]\n if not num:\n raise self.resource.DoesNotExist(\n \"%s matching query does not exist. \"\n \"Lookup parameters were %s\" %\n (self.resource._meta.resource_name, kwargs))\n\n raise self.resource.MultipleObjectsReturned(\n \"get() returned more than one %s -- it returned %s! \"\n \"Lookup parameters were %s\" %\n (self.resource._meta.resource_name, num, kwargs))\n",
"def create(self, **kwargs):\n \"\"\"\n Creates a new object with the given kwargs, saving it to the api\n and returning the created object.\n \"\"\"\n obj = self.resource(**kwargs)\n obj.save(force_insert=True)\n return obj\n"
] |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.delete
|
python
|
def delete(self):
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
|
Deletes the records in the current QuerySet.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L411-L422
| null |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.filter
|
python
|
def filter(self, **kwargs):
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
|
Returns a new QuerySet instance with the args ANDed to the existing
set.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L439-L450
|
[
"def _clone(self, klass=None, setup=False, **kwargs):\n if klass is None:\n klass = self.__class__\n\n query = self.query.clone()\n\n c = klass(resource=self.resource, query=query)\n c.__dict__.update(kwargs)\n\n return c\n"
] |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet.order_by
|
python
|
def order_by(self, field_name=None):
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
|
Returns a new QuerySet instance with the ordering changed.
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L452-L464
| null |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
"""
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
"""
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
dstufft/crust
|
crust/query.py
|
QuerySet._fill_cache
|
python
|
def _fill_cache(self, num=None):
if self._iter:
try:
for i in range(num or ITER_CHUNK_SIZE):
self._result_cache.append(next(self._iter))
except StopIteration:
self._iter = None
|
Fills the result cache with 'num' more entries (or until the results
iterator is exhausted).
|
train
|
https://github.com/dstufft/crust/blob/5d4011ecace12fd3f68a03a17dbefb78390a9fc0/crust/query.py#L501-L511
| null |
class QuerySet(object):
"""
Represents a lazy api lookup for a set of objects.
"""
def __init__(self, resource, query=None, *args, **kwargs):
super(QuerySet, self).__init__(*args, **kwargs)
self.resource = resource
self.query = query or Query(self.resource)
self._result_cache = None
self._iter = None
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in six.iteritems(self.__dict__):
if k in ("_iter", "_result_cache"):
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
len(self)
obj_dict = self.__dict__.copy()
obj_dict["_iter"] = None
return obj_dict
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
# Since __len__ is called quite frequently (for example, as part of
# list(qs), we make some effort here to be as efficient as possible
# whilst not messing up any existing iterators against the QuerySet.
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
# Python's list iterator is better than our version when we're just
# iterating over the cache.
return iter(self._result_cache)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
next(iter(self))
except StopIteration:
return False
return True
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
# The result cache has only been partially populated, so we may
# need to fill it out a bit more.
if isinstance(k, slice):
if k.stop is not None:
# Some people insist on passing in strings here.
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
###############################
# METHODS THAT DO API QUERIES #
###############################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the api.
"""
for item in self.query.results():
obj = self.resource(**item)
yield obj
def count(self):
"""
Returns the number of records as an integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid an api call.
"""
if self._result_cache is not None and not self._iter:
return len(self._result_cache)
return self.query.get_count()
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.resource.DoesNotExist(
"%s matching query does not exist. "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, kwargs))
raise self.resource.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s! "
"Lookup parameters were %s" %
(self.resource._meta.resource_name, num, kwargs))
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the api
and returning the created object.
"""
obj = self.resource(**kwargs)
obj.save(force_insert=True)
return obj
def get_or_create(self, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
assert kwargs, "get_or_create() must be passed at least one keyword argument"
defaults = kwargs.pop("defaults", {})
lookup = kwargs.copy()
try:
return self.get(**lookup), False
except self.resource.DoesNotExist:
params = dict([(k, v) for k, v in kwargs.items()])
params.update(defaults)
obj = self.create(**params)
return obj, True
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), "Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# Disable non-supported fields.
del_query.query.clear_ordering()
return del_query.query.delete()
def exists(self):
if self._result_cache is None:
return self.query.has_results()
return bool(self._result_cache)
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one.
"""
return self._clone()
def filter(self, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
if kwargs:
assert self.query.can_filter(), "Cannot filter a query once a slice has been taken."
clone = self._clone()
clone.query.add_filters(**kwargs)
return clone
def order_by(self, field_name=None):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), "Cannot reorder a query once a slice has been taken."
clone = self._clone()
clone.query.clear_ordering()
if field_name is not None:
clone.query.add_ordering(field_name)
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause.
"""
if self.query.order_by:
return True
else:
return False
###################
# PRIVATE METHODS #
###################
def _result_iter(self):
pos = 0
while True:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos += 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _clone(self, klass=None, setup=False, **kwargs):
if klass is None:
klass = self.__class__
query = self.query.clone()
c = klass(resource=self.resource, query=query)
c.__dict__.update(kwargs)
return c
|
Othernet-Project/conz
|
conz/console.py
|
Console.pstd
|
python
|
def pstd(self, *args, **kwargs):
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
|
Console to STDOUT
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L67-L71
|
[
"def print(self, *args, **kwargs):\n \"\"\" Thin wrapper around print\n\n All other methods must go through this method for all printing needs.\n \"\"\"\n print(*args, **kwargs)\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.perr
|
python
|
def perr(self, *args, **kwargs):
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
|
Console to STERR
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L73-L77
|
[
"def print(self, *args, **kwargs):\n \"\"\" Thin wrapper around print\n\n All other methods must go through this method for all printing needs.\n \"\"\"\n print(*args, **kwargs)\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.pok
|
python
|
def pok(self, val, ok='OK'):
self.pstd(self.color.green('{}: {}'.format(val, ok)))
|
Print val: OK in green on STDOUT
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L79-L81
|
[
"def pstd(self, *args, **kwargs):\n \"\"\" Console to STDOUT \"\"\"\n kwargs['file'] = self.out\n self.print(*args, **kwargs)\n sys.stdout.flush()\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def png(self, val, ng='ERR'):
""" Print val: ERR in red on STDOUT """
self.pstd(self.color.red('{}: {}'.format(val, ng)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Othernet-Project/conz
|
conz/console.py
|
Console.png
|
python
|
def png(self, val, ng='ERR'):
self.pstd(self.color.red('{}: {}'.format(val, ng)))
|
Print val: ERR in red on STDOUT
|
train
|
https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L83-L85
|
[
"def pstd(self, *args, **kwargs):\n \"\"\" Console to STDOUT \"\"\"\n kwargs['file'] = self.out\n self.print(*args, **kwargs)\n sys.stdout.flush()\n"
] |
class Console:
"""
Wrapper around print with helper methods that cover typical ``print()``
usage in console programs.
"""
ProgressEnd = progress.ProgressEnd
ProgressOK = progress.ProgressOK
ProgressAbrt = progress.ProgressAbrt
color = ansi_colors.color
def __init__(self, verbose=False, stdout=sys.stdout, stderr=sys.stderr,
debug=False):
"""
``verbose`` flag controls suppression of verbose outputs (those printed
using ``pverb()`` method). The verbose output is usually a helpful
message for interactive applications, but may break other scripts in
pipes.
``stdout`` and ``stderrr`` are the default STDOUT file for all
``print()`` calls.
To enable debugging (e.g., printing stack traces), use the ``debug``
argument and set it to ``True``.
"""
self.verbose = verbose
self.out = stdout
self.err = stderr
self.register_signals()
self.debug = debug
def print(self, *args, **kwargs):
""" Thin wrapper around print
All other methods must go through this method for all printing needs.
"""
print(*args, **kwargs)
def pstd(self, *args, **kwargs):
""" Console to STDOUT """
kwargs['file'] = self.out
self.print(*args, **kwargs)
sys.stdout.flush()
def perr(self, *args, **kwargs):
""" Console to STERR """
kwargs['file'] = self.err
self.print(*args, **kwargs)
sys.stderr.flush()
def pok(self, val, ok='OK'):
""" Print val: OK in green on STDOUT """
self.pstd(self.color.green('{}: {}'.format(val, ok)))
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa)))
def pverr(self, val, msg, *args, **kwargs):
kwargs.setdefault('file', self.err)
self.print('{}: {}'.format(val, msg), *args, **kwargs)
def pverb(self, *args, **kwargs):
""" Console verbose message to STDOUT """
if not self.verbose:
return
self.pstd(*args, **kwargs)
def quit(self, code=0):
sys.exit(code)
def read(self, prompt='', clean=lambda x: x):
""" Display a prompt and ask user for input
A function to clean the user input can be passed as ``clean`` argument.
This function takes a single value, which is the string user entered,
and returns a cleaned value. Default is a pass-through function, which
is an equivalent of::
def clean(val):
return val
"""
ans = read(prompt + ' ')
return clean(ans)
def rvpl(self, prompt, error='Entered value is invalid', intro=None,
validator=lambda x: x != '', clean=lambda x: x.strip(),
strict=True, default=None):
""" Start a read-validate-print loop
The RVPL will read the user input, validate it, and loop until the
entered value passes the validation, then return it.
Error message can be customized using the ``error`` argument. If the
value is a callable, it will be called with the value and it will be
expected to return a printable message. Exceptions raised by the
``error`` function are not trapped.
When ``intro`` is passed, it is printed above the prompt.
The ``validator`` argument is is a function that validates the user
input. Default validator simply validates if user entered any value.
The ``clean`` argument specifies a function for the ``read()`` method
with the same semantics.
"""
if intro:
self.pstd(utils.rewrap_long(intro))
val = self.read(prompt, clean)
while not validator(val):
if not strict:
return default
if hasattr(error, '__call__'):
self.perr(error(val))
else:
self.perr(error)
val = self.read(prompt, clean)
return val
def yesno(self, prompt, error='Please type either y or n', intro=None,
default=None):
""" Ask user for yes or no answer
The prompt will include a typical '(y/n):' at the end. Depending on
whether ``default`` was specified, this may also be '(Y/n):' or
'(y/N):'.
The ``default`` argument can be ``True`` or ``False``, with meaning of
'yes' and 'no' respectively. Default is ``None`` which means no
default. When default value is specified, malformed or empty response
will cause the ``default`` value to be returned.
Optional ``intro`` text can be specified which will be shown above the
prompt.
"""
if default is None:
prompt += ' (y/n):'
else:
if default is True:
prompt += ' (Y/n):'
default = 'y'
if default is False:
prompt += ' (y/N):'
default = 'n'
validator = lambda x: x in ['y', 'yes', 'n', 'no']
val = self.rvpl(prompt, error=error, intro=intro, validator=validator,
clean=lambda x: x.strip().lower(),
strict=default is None, default=default)
return val in ['y', 'yes']
def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)]
def readpipe(self, chunk=None):
""" Return iterator that iterates over STDIN line by line
If ``chunk`` is set to a positive non-zero integer value, then the
reads are performed in chunks of that many lines, and returned as a
list. Otherwise the lines are returned one by one.
"""
read = []
while True:
l = sys.stdin.readline()
if not l:
if read:
yield read
return
return
if not chunk:
yield l
else:
read.append(l)
if len(read) == chunk:
yield read
@property
def interm(self):
return hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
@property
def outterm(self):
return hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def register_signals(self):
signal.signal(signal.SIGINT, self.onint)
signal.signal(signal.SIGPIPE, self.onpipe)
def onint(self, signum, exc):
self.perr('\nQuitting program due to keyboard interrupt')
self.quit(1)
def onpipe(self, signup, exc):
self.quit(1)
def error(self, msg='Program error: {err}', exit=None):
""" Error handler factory
This function takes a message with optional ``{err}`` placeholder and
returns a function that takes an exception object, prints the error
message to STDERR and optionally quits.
If no message is supplied (e.g., passing ``None`` or ``False`` or empty
string), then nothing is output to STDERR.
The ``exit`` argument can be set to a non-zero value, in which case the
program quits after printing the message using its value as return
value of the program.
The returned function can be used with the ``progress()`` context
manager as error handler.
"""
def handler(exc):
if msg:
self.perr(msg.format(err=exc))
if exit is not None:
self.quit(exit)
return handler
@contextlib.contextmanager
def progress(self, msg, onerror=None, sep='...', end='DONE', abrt='FAIL',
prog='.', excs=(Exception,), reraise=True):
""" Context manager for handling interactive prog indication
This context manager streamlines presenting banners and prog
indicators. To start the prog, pass ``msg`` argument as a start
message. For example::
printer = Console(verbose=True)
with printer.progress('Checking files') as prog:
# Do some checks
if errors:
prog.abrt()
prog.end()
The context manager returns a ``Progress`` instance, which provides
methods like ``abrt()`` (abort), ``end()`` (end), and ``prog()`` (print
prog indicator).
The prog methods like ``abrt()`` and ``end()`` will raise an
exception that interrupts the prog. These exceptions are
``ProgressEnd`` exception subclasses and are ``ProgressAbrt`` and
``ProgressOK`` respectively. They are silenced and not handled in any
way as they only serve the purpose of flow control.
Other exceptions are trapped and ``abrt()`` is called. The exceptions
that should be trapped can be customized using the ``excs`` argument,
which should be a tuple of exception classes.
If a handler function is passed using ``onerror`` argument, then this
function takes the raised exception and handles it. By default, the
``error()`` factory is called with no arguments to generate the default
error handler. If string is passed, then ``error()`` factory is called
with that string.
Finally, when prog is aborted either naturally or when exception is
raised, it is possible to reraise the ``ProgressAbrt`` exception. This
is done using the ``reraise`` flag. Default is to reraise.
"""
if not onerror:
onerror = self.error()
if type(onerror) is str:
onerror = self.error(msg=onerror)
self.pverb(msg, end=sep)
prog = progress.Progress(self.pverb, end=end, abrt=abrt, prog=prog)
try:
yield prog
prog.end()
except self.ProgressOK:
pass
except self.ProgressAbrt as err:
if reraise:
raise err
except KeyboardInterrupt:
raise
except excs as err:
prog.abrt(noraise=True)
if onerror:
onerror(err)
if self.debug:
traceback.print_exc()
if reraise:
raise self.ProgressAbrt()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.