repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
joerick/pyinstrument | pyinstrument/frame.py | Frame.add_children | python | def add_children(self, frames, after=None):
'''
Convenience method to add multiple frames at once.
'''
if after is not None:
# if there's an 'after' parameter, add the frames in reverse so the order is
# preserved.
for frame in reversed(frames):
self.add_child(frame, after=after)
else:
for frame in frames:
self.add_child(frame) | Convenience method to add multiple frames at once. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/frame.py#L115-L126 | [
"def add_child(self, frame, after=None):\n '''\n Adds a child frame, updating the parent link.\n Optionally, insert the frame in a specific position by passing the frame to insert\n this one after.\n '''\n frame.remove_from_parent()\n frame.parent = self\n if after is None:\n self._ch... | class Frame(BaseFrame):
"""
Object that represents a stack frame in the parsed tree
"""
def __init__(self, identifier='', parent=None, children=None, self_time=0):
super(Frame, self).__init__(parent=parent, self_time=self_time)
self.identifier = identifier
self._children = []
self._time = None
if children:
for child in children:
self.add_child(child)
def add_child(self, frame, after=None):
'''
Adds a child frame, updating the parent link.
Optionally, insert the frame in a specific position by passing the frame to insert
this one after.
'''
frame.remove_from_parent()
frame.parent = self
if after is None:
self._children.append(frame)
else:
index = self._children.index(after) + 1
self._children.insert(index, frame)
self._invalidate_time_caches()
@property
def children(self):
# Return an immutable copy (this property should only be mutated using methods)
# Also, returning a copy avoid problems when mutating while iterating, which happens a lot
# in processors!
return tuple(self._children)
@property
def function(self):
if self.identifier:
return self.identifier.split('\x00')[0]
@property
def file_path(self):
if self.identifier:
return self.identifier.split('\x00')[1]
@property
def line_no(self):
if self.identifier:
return int(self.identifier.split('\x00')[2])
@property
def file_path_short(self):
""" Return the path resolved against the closest entry in sys.path """
if not hasattr(self, '_file_path_short'):
if self.file_path:
result = None
for path in sys.path:
# On Windows, if self.file_path and path are on different drives, relpath
# will result in exception, because it cannot compute a relpath in this case.
# The root cause is that on Windows, there is no root dir like '/' on Linux.
try:
candidate = os.path.relpath(self.file_path, path)
except ValueError:
continue
if not result or (len(candidate.split(os.sep)) < len(result.split(os.sep))):
result = candidate
self._file_path_short = result
else:
self._file_path_short = None
return self._file_path_short
@property
def is_application_code(self):
if self.identifier:
return (('%slib%s' % (os.sep, os.sep)) not in self.file_path
and '<frozen importlib._bootstrap' not in self.file_path)
@property
def code_position_short(self):
if self.identifier:
return '%s:%i' % (self.file_path_short, self.line_no)
def time(self):
if self._time is None:
# can't use a sum(<generator>) expression here sadly, because this method
# recurses down the call tree, and the generator uses an extra stack frame,
# meaning we hit the stack limit when the profiled code is 500 frames deep.
self._time = self.self_time
for child in self.children:
self._time += child.time()
return self._time
# pylint: disable=W0212
def _invalidate_time_caches(self):
self._time = None
# null all the parent's caches also.
frame = self
while frame.parent is not None:
frame = frame.parent
frame._time = None
def __repr__(self):
return 'Frame(identifier=%s, time=%f, len(children)=%d), group=%r' % (
self.identifier, self.time(), len(self.children), self.group
)
|
joerick/pyinstrument | pyinstrument/frame.py | Frame.file_path_short | python | def file_path_short(self):
if not hasattr(self, '_file_path_short'):
if self.file_path:
result = None
for path in sys.path:
# On Windows, if self.file_path and path are on different drives, relpath
# will result in exception, because it cannot compute a relpath in this case.
# The root cause is that on Windows, there is no root dir like '/' on Linux.
try:
candidate = os.path.relpath(self.file_path, path)
except ValueError:
continue
if not result or (len(candidate.split(os.sep)) < len(result.split(os.sep))):
result = candidate
self._file_path_short = result
else:
self._file_path_short = None
return self._file_path_short | Return the path resolved against the closest entry in sys.path | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/frame.py#L151-L173 | null | class Frame(BaseFrame):
"""
Object that represents a stack frame in the parsed tree
"""
def __init__(self, identifier='', parent=None, children=None, self_time=0):
super(Frame, self).__init__(parent=parent, self_time=self_time)
self.identifier = identifier
self._children = []
self._time = None
if children:
for child in children:
self.add_child(child)
def add_child(self, frame, after=None):
'''
Adds a child frame, updating the parent link.
Optionally, insert the frame in a specific position by passing the frame to insert
this one after.
'''
frame.remove_from_parent()
frame.parent = self
if after is None:
self._children.append(frame)
else:
index = self._children.index(after) + 1
self._children.insert(index, frame)
self._invalidate_time_caches()
def add_children(self, frames, after=None):
'''
Convenience method to add multiple frames at once.
'''
if after is not None:
# if there's an 'after' parameter, add the frames in reverse so the order is
# preserved.
for frame in reversed(frames):
self.add_child(frame, after=after)
else:
for frame in frames:
self.add_child(frame)
@property
def children(self):
# Return an immutable copy (this property should only be mutated using methods)
# Also, returning a copy avoid problems when mutating while iterating, which happens a lot
# in processors!
return tuple(self._children)
@property
def function(self):
if self.identifier:
return self.identifier.split('\x00')[0]
@property
def file_path(self):
if self.identifier:
return self.identifier.split('\x00')[1]
@property
def line_no(self):
if self.identifier:
return int(self.identifier.split('\x00')[2])
@property
@property
def is_application_code(self):
if self.identifier:
return (('%slib%s' % (os.sep, os.sep)) not in self.file_path
and '<frozen importlib._bootstrap' not in self.file_path)
@property
def code_position_short(self):
if self.identifier:
return '%s:%i' % (self.file_path_short, self.line_no)
def time(self):
if self._time is None:
# can't use a sum(<generator>) expression here sadly, because this method
# recurses down the call tree, and the generator uses an extra stack frame,
# meaning we hit the stack limit when the profiled code is 500 frames deep.
self._time = self.self_time
for child in self.children:
self._time += child.time()
return self._time
# pylint: disable=W0212
def _invalidate_time_caches(self):
self._time = None
# null all the parent's caches also.
frame = self
while frame.parent is not None:
frame = frame.parent
frame._time = None
def __repr__(self):
return 'Frame(identifier=%s, time=%f, len(children)=%d), group=%r' % (
self.identifier, self.time(), len(self.children), self.group
)
|
joerick/pyinstrument | pyinstrument/frame.py | FrameGroup.exit_frames | python | def exit_frames(self):
'''
Returns a list of frames whose children include a frame outside of the group
'''
if self._exit_frames is None:
exit_frames = []
for frame in self.frames:
if any(c.group != self for c in frame.children):
exit_frames.append(frame)
self._exit_frames = exit_frames
return self._exit_frames | Returns a list of frames whose children include a frame outside of the group | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/frame.py#L286-L297 | null | class FrameGroup(object):
def __init__(self, root, **kwargs):
super(FrameGroup, self).__init__(**kwargs)
self.root = root
self.id = str(uuid.uuid4())
self._frames = []
self._exit_frames = None
self._libraries = None
self.add_frame(root)
@property
def libraries(self):
if self._libraries is None:
libraries = []
for frame in self.frames:
library = frame.file_path_short.split(os.sep)[0]
library, _ = os.path.splitext(library)
if library and library not in libraries:
libraries.append(library)
self._libraries = libraries
return self._libraries
@property
def frames(self):
return tuple(self._frames)
# pylint: disable=W0212
def add_frame(self, frame):
if frame.group:
frame.group._frames.remove(frame)
self._frames.append(frame)
frame.group = self
@property
def __repr__(self):
return 'FrameGroup(len(frames)=%d)' % len(self.frames)
|
joerick/pyinstrument | pyinstrument/profiler.py | Profiler.first_interesting_frame | python | def first_interesting_frame(self):
root_frame = self.root_frame()
frame = root_frame
while len(frame.children) <= 1:
if frame.children:
frame = frame.children[0]
else:
# there are no branches
return root_frame
return frame | Traverse down the frame hierarchy until a frame is found with more than one child | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/profiler.py#L119-L133 | null | class Profiler(object):
# pylint: disable=W0613
@deprecated_option('use_signal')
@deprecated_option('recorder')
def __init__(self, interval=0.001, use_signal=None, recorder=None):
self.interval = interval
self.last_profile_time = 0.0
self.frame_records = []
self._start_time = None
self._start_process_time = None
self.last_session = None
def start(self, caller_frame=None):
self.last_profile_time = timer()
self._start_time = time.time()
if process_time:
self._start_process_time = process_time()
if caller_frame is None:
caller_frame = inspect.currentframe().f_back
self._start_call_stack = self._call_stack_for_frame(caller_frame)
setstatprofile(self._profile, self.interval)
def stop(self):
setstatprofile(None)
if process_time:
cpu_time = process_time() - self._start_process_time
self._start_process_time = None
else:
cpu_time = None
self.last_session = ProfilerSession(
frame_records=self.frame_records,
start_time=self._start_time,
duration=time.time() - self._start_time,
sample_count=len(self.frame_records),
program=' '.join(sys.argv),
start_call_stack=self._start_call_stack,
cpu_time=cpu_time,
)
return self.last_session
def __enter__(self):
self.start(caller_frame=inspect.currentframe().f_back)
return self
def __exit__(self, *args):
self.stop()
# pylint: disable=W0613
def _profile(self, frame, event, arg):
now = timer()
time_since_last_profile = now - self.last_profile_time
if time_since_last_profile < self.interval:
return
if event == 'call':
frame = frame.f_back
self.frame_records.append((self._call_stack_for_frame(frame), time_since_last_profile))
self.last_profile_time = now
def _call_stack_for_frame(self, frame):
call_stack = []
while frame is not None:
identifier = '%s\x00%s\x00%i' % (
frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno
)
call_stack.append(identifier)
frame = frame.f_back
# we iterated from the leaf to the root, we actually want the call stack
# starting at the root, so reverse this array
call_stack.reverse()
return call_stack
@deprecated_option('root')
def output_text(self, root=None, unicode=False, color=False, show_all=False, timeline=False):
return renderers.ConsoleRenderer(unicode=unicode, color=color, show_all=show_all, timeline=timeline).render(self.last_session)
@deprecated_option('root')
def output_html(self, root=None):
return renderers.HTMLRenderer().render(self.last_session)
def open_in_browser(self):
return renderers.HTMLRenderer().open_in_browser(self.last_session)
@deprecated_option('root')
def output(self, renderer, root=None):
return renderer.render(self.last_session)
@deprecated
def root_frame(self):
if self.last_session:
return self.last_session.root_frame()
@deprecated
@deprecated
def starting_frame(self, root=False):
if root:
return self.root_frame()
else:
return self.first_interesting_frame()
|
joerick/pyinstrument | pyinstrument/processors.py | aggregate_repeated_calls | python | def aggregate_repeated_calls(frame, options):
'''
Converts a timeline into a time-aggregate summary.
Adds together calls along the same call stack, so that repeated calls appear as the same
frame. Removes time-linearity - frames are sorted according to total time spent.
Useful for outputs that display a summary of execution (e.g. text and html outputs)
'''
if frame is None:
return None
children_by_identifier = {}
# iterate over a copy of the children since it's going to mutate while we're iterating
for child in frame.children:
if child.identifier in children_by_identifier:
aggregate_frame = children_by_identifier[child.identifier]
# combine the two frames, putting the children and self_time into the aggregate frame.
aggregate_frame.self_time += child.self_time
if child.children:
aggregate_frame.add_children(child.children)
# remove this frame, it's been incorporated into aggregate_frame
child.remove_from_parent()
else:
# never seen this identifier before. It becomes the aggregate frame.
children_by_identifier[child.identifier] = child
# recurse into the children
for child in frame.children:
aggregate_repeated_calls(child, options=options)
# sort the children by time
# it's okay to use the internal _children list, sinde we're not changing the tree
# structure.
frame._children.sort(key=methodcaller('time'), reverse=True) # pylint: disable=W0212
return frame | Converts a timeline into a time-aggregate summary.
Adds together calls along the same call stack, so that repeated calls appear as the same
frame. Removes time-linearity - frames are sorted according to total time spent.
Useful for outputs that display a summary of execution (e.g. text and html outputs) | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/processors.py#L31-L70 | [
"def aggregate_repeated_calls(frame, options):\n '''\n Converts a timeline into a time-aggregate summary.\n\n Adds together calls along the same call stack, so that repeated calls appear as the same\n frame. Removes time-linearity - frames are sorted according to total time spent.\n\n Useful for outp... | '''
Processors are functions that take a Frame object, and mutate the tree to perform some task.
They can mutate the tree in-place, but also can change the root frame, they should always be
called like:
frame = processor(frame, options=...)
'''
import re
from operator import methodcaller
from pyinstrument.frame import FrameGroup, SelfTimeFrame
def remove_importlib(frame, options):
if frame is None:
return None
for child in frame.children:
remove_importlib(child, options=options)
if '<frozen importlib._bootstrap' in child.file_path:
# remove this node, moving the self_time and children up to the parent
frame.self_time += child.self_time
frame.add_children(child.children, after=child)
child.remove_from_parent()
return frame
def group_library_frames_processor(frame, options):
if frame is None:
return None
hide_regex = options.get('hide_regex', r'.*[\\\/]lib[\\\/].*')
show_regex = options.get('show_regex')
def should_be_hidden(frame):
return ((hide_regex and re.match(hide_regex, frame.file_path))
and not (show_regex and re.match(show_regex, frame.file_path)))
def add_frames_to_group(frame, group):
group.add_frame(frame)
for child in frame.children:
if should_be_hidden(child):
add_frames_to_group(child, group)
for child in frame.children:
if not child.group and (should_be_hidden(child)
and any(should_be_hidden(cc) for cc in child.children)):
group = FrameGroup(child)
add_frames_to_group(child, group)
group_library_frames_processor(child, options=options)
return frame
def merge_consecutive_self_time(frame, options):
'''
Combines consecutive 'self time' frames
'''
if frame is None:
return None
previous_self_time_frame = None
for child in frame.children:
if isinstance(child, SelfTimeFrame):
if previous_self_time_frame:
# merge
previous_self_time_frame.self_time += child.self_time
child.remove_from_parent()
else:
# keep a reference, maybe it'll be added to on the next loop
previous_self_time_frame = child
else:
previous_self_time_frame = None
for child in frame.children:
merge_consecutive_self_time(child, options=options)
return frame
def remove_unnecessary_self_time_nodes(frame, options):
'''
When a frame has only one child, and that is a self-time frame, remove that node, since it's
unnecessary - it clutters the output and offers no additional information.
'''
if frame is None:
return None
if len(frame.children) == 1 and isinstance(frame.children[0], SelfTimeFrame):
child = frame.children[0]
frame.self_time += child.self_time
child.remove_from_parent()
for child in frame.children:
remove_unnecessary_self_time_nodes(child, options=options)
return frame
def remove_irrelevant_nodes(frame, options, total_time=None):
'''
Remove nodes that represent less than e.g. 1% of the output
'''
if frame is None:
return None
if total_time is None:
total_time = frame.time()
filter_threshold = options.get('filter_threshold', 0.01)
for child in frame.children:
proportion_of_total = child.time() / total_time
if proportion_of_total < filter_threshold:
frame.self_time += child.time()
child.remove_from_parent()
for child in frame.children:
remove_irrelevant_nodes(child, options=options, total_time=total_time)
return frame
|
joerick/pyinstrument | pyinstrument/processors.py | merge_consecutive_self_time | python | def merge_consecutive_self_time(frame, options):
'''
Combines consecutive 'self time' frames
'''
if frame is None:
return None
previous_self_time_frame = None
for child in frame.children:
if isinstance(child, SelfTimeFrame):
if previous_self_time_frame:
# merge
previous_self_time_frame.self_time += child.self_time
child.remove_from_parent()
else:
# keep a reference, maybe it'll be added to on the next loop
previous_self_time_frame = child
else:
previous_self_time_frame = None
for child in frame.children:
merge_consecutive_self_time(child, options=options)
return frame | Combines consecutive 'self time' frames | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/processors.py#L101-L125 | [
"def merge_consecutive_self_time(frame, options):\n '''\n Combines consecutive 'self time' frames\n '''\n if frame is None:\n return None\n\n previous_self_time_frame = None\n\n for child in frame.children:\n if isinstance(child, SelfTimeFrame):\n if previous_self_time_fra... | '''
Processors are functions that take a Frame object, and mutate the tree to perform some task.
They can mutate the tree in-place, but also can change the root frame, they should always be
called like:
frame = processor(frame, options=...)
'''
import re
from operator import methodcaller
from pyinstrument.frame import FrameGroup, SelfTimeFrame
def remove_importlib(frame, options):
if frame is None:
return None
for child in frame.children:
remove_importlib(child, options=options)
if '<frozen importlib._bootstrap' in child.file_path:
# remove this node, moving the self_time and children up to the parent
frame.self_time += child.self_time
frame.add_children(child.children, after=child)
child.remove_from_parent()
return frame
def aggregate_repeated_calls(frame, options):
'''
Converts a timeline into a time-aggregate summary.
Adds together calls along the same call stack, so that repeated calls appear as the same
frame. Removes time-linearity - frames are sorted according to total time spent.
Useful for outputs that display a summary of execution (e.g. text and html outputs)
'''
if frame is None:
return None
children_by_identifier = {}
# iterate over a copy of the children since it's going to mutate while we're iterating
for child in frame.children:
if child.identifier in children_by_identifier:
aggregate_frame = children_by_identifier[child.identifier]
# combine the two frames, putting the children and self_time into the aggregate frame.
aggregate_frame.self_time += child.self_time
if child.children:
aggregate_frame.add_children(child.children)
# remove this frame, it's been incorporated into aggregate_frame
child.remove_from_parent()
else:
# never seen this identifier before. It becomes the aggregate frame.
children_by_identifier[child.identifier] = child
# recurse into the children
for child in frame.children:
aggregate_repeated_calls(child, options=options)
# sort the children by time
# it's okay to use the internal _children list, sinde we're not changing the tree
# structure.
frame._children.sort(key=methodcaller('time'), reverse=True) # pylint: disable=W0212
return frame
def group_library_frames_processor(frame, options):
if frame is None:
return None
hide_regex = options.get('hide_regex', r'.*[\\\/]lib[\\\/].*')
show_regex = options.get('show_regex')
def should_be_hidden(frame):
return ((hide_regex and re.match(hide_regex, frame.file_path))
and not (show_regex and re.match(show_regex, frame.file_path)))
def add_frames_to_group(frame, group):
group.add_frame(frame)
for child in frame.children:
if should_be_hidden(child):
add_frames_to_group(child, group)
for child in frame.children:
if not child.group and (should_be_hidden(child)
and any(should_be_hidden(cc) for cc in child.children)):
group = FrameGroup(child)
add_frames_to_group(child, group)
group_library_frames_processor(child, options=options)
return frame
def merge_consecutive_self_time(frame, options):
'''
Combines consecutive 'self time' frames
'''
if frame is None:
return None
previous_self_time_frame = None
for child in frame.children:
if isinstance(child, SelfTimeFrame):
if previous_self_time_frame:
# merge
previous_self_time_frame.self_time += child.self_time
child.remove_from_parent()
else:
# keep a reference, maybe it'll be added to on the next loop
previous_self_time_frame = child
else:
previous_self_time_frame = None
for child in frame.children:
merge_consecutive_self_time(child, options=options)
return frame
def remove_unnecessary_self_time_nodes(frame, options):
'''
When a frame has only one child, and that is a self-time frame, remove that node, since it's
unnecessary - it clutters the output and offers no additional information.
'''
if frame is None:
return None
if len(frame.children) == 1 and isinstance(frame.children[0], SelfTimeFrame):
child = frame.children[0]
frame.self_time += child.self_time
child.remove_from_parent()
for child in frame.children:
remove_unnecessary_self_time_nodes(child, options=options)
return frame
def remove_irrelevant_nodes(frame, options, total_time=None):
'''
Remove nodes that represent less than e.g. 1% of the output
'''
if frame is None:
return None
if total_time is None:
total_time = frame.time()
filter_threshold = options.get('filter_threshold', 0.01)
for child in frame.children:
proportion_of_total = child.time() / total_time
if proportion_of_total < filter_threshold:
frame.self_time += child.time()
child.remove_from_parent()
for child in frame.children:
remove_irrelevant_nodes(child, options=options, total_time=total_time)
return frame
|
joerick/pyinstrument | pyinstrument/processors.py | remove_unnecessary_self_time_nodes | python | def remove_unnecessary_self_time_nodes(frame, options):
'''
When a frame has only one child, and that is a self-time frame, remove that node, since it's
unnecessary - it clutters the output and offers no additional information.
'''
if frame is None:
return None
if len(frame.children) == 1 and isinstance(frame.children[0], SelfTimeFrame):
child = frame.children[0]
frame.self_time += child.self_time
child.remove_from_parent()
for child in frame.children:
remove_unnecessary_self_time_nodes(child, options=options)
return frame | When a frame has only one child, and that is a self-time frame, remove that node, since it's
unnecessary - it clutters the output and offers no additional information. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/processors.py#L128-L144 | [
"def remove_unnecessary_self_time_nodes(frame, options):\n '''\n When a frame has only one child, and that is a self-time frame, remove that node, since it's\n unnecessary - it clutters the output and offers no additional information.\n '''\n if frame is None:\n return None\n\n if len(frame... | '''
Processors are functions that take a Frame object, and mutate the tree to perform some task.
They can mutate the tree in-place, but also can change the root frame, they should always be
called like:
frame = processor(frame, options=...)
'''
import re
from operator import methodcaller
from pyinstrument.frame import FrameGroup, SelfTimeFrame
def remove_importlib(frame, options):
if frame is None:
return None
for child in frame.children:
remove_importlib(child, options=options)
if '<frozen importlib._bootstrap' in child.file_path:
# remove this node, moving the self_time and children up to the parent
frame.self_time += child.self_time
frame.add_children(child.children, after=child)
child.remove_from_parent()
return frame
def aggregate_repeated_calls(frame, options):
'''
Converts a timeline into a time-aggregate summary.
Adds together calls along the same call stack, so that repeated calls appear as the same
frame. Removes time-linearity - frames are sorted according to total time spent.
Useful for outputs that display a summary of execution (e.g. text and html outputs)
'''
if frame is None:
return None
children_by_identifier = {}
# iterate over a copy of the children since it's going to mutate while we're iterating
for child in frame.children:
if child.identifier in children_by_identifier:
aggregate_frame = children_by_identifier[child.identifier]
# combine the two frames, putting the children and self_time into the aggregate frame.
aggregate_frame.self_time += child.self_time
if child.children:
aggregate_frame.add_children(child.children)
# remove this frame, it's been incorporated into aggregate_frame
child.remove_from_parent()
else:
# never seen this identifier before. It becomes the aggregate frame.
children_by_identifier[child.identifier] = child
# recurse into the children
for child in frame.children:
aggregate_repeated_calls(child, options=options)
# sort the children by time
# it's okay to use the internal _children list, sinde we're not changing the tree
# structure.
frame._children.sort(key=methodcaller('time'), reverse=True) # pylint: disable=W0212
return frame
def group_library_frames_processor(frame, options):
if frame is None:
return None
hide_regex = options.get('hide_regex', r'.*[\\\/]lib[\\\/].*')
show_regex = options.get('show_regex')
def should_be_hidden(frame):
return ((hide_regex and re.match(hide_regex, frame.file_path))
and not (show_regex and re.match(show_regex, frame.file_path)))
def add_frames_to_group(frame, group):
group.add_frame(frame)
for child in frame.children:
if should_be_hidden(child):
add_frames_to_group(child, group)
for child in frame.children:
if not child.group and (should_be_hidden(child)
and any(should_be_hidden(cc) for cc in child.children)):
group = FrameGroup(child)
add_frames_to_group(child, group)
group_library_frames_processor(child, options=options)
return frame
def merge_consecutive_self_time(frame, options):
'''
Combines consecutive 'self time' frames
'''
if frame is None:
return None
previous_self_time_frame = None
for child in frame.children:
if isinstance(child, SelfTimeFrame):
if previous_self_time_frame:
# merge
previous_self_time_frame.self_time += child.self_time
child.remove_from_parent()
else:
# keep a reference, maybe it'll be added to on the next loop
previous_self_time_frame = child
else:
previous_self_time_frame = None
for child in frame.children:
merge_consecutive_self_time(child, options=options)
return frame
def remove_unnecessary_self_time_nodes(frame, options):
'''
When a frame has only one child, and that is a self-time frame, remove that node, since it's
unnecessary - it clutters the output and offers no additional information.
'''
if frame is None:
return None
if len(frame.children) == 1 and isinstance(frame.children[0], SelfTimeFrame):
child = frame.children[0]
frame.self_time += child.self_time
child.remove_from_parent()
for child in frame.children:
remove_unnecessary_self_time_nodes(child, options=options)
return frame
def remove_irrelevant_nodes(frame, options, total_time=None):
'''
Remove nodes that represent less than e.g. 1% of the output
'''
if frame is None:
return None
if total_time is None:
total_time = frame.time()
filter_threshold = options.get('filter_threshold', 0.01)
for child in frame.children:
proportion_of_total = child.time() / total_time
if proportion_of_total < filter_threshold:
frame.self_time += child.time()
child.remove_from_parent()
for child in frame.children:
remove_irrelevant_nodes(child, options=options, total_time=total_time)
return frame
|
joerick/pyinstrument | pyinstrument/processors.py | remove_irrelevant_nodes | python | def remove_irrelevant_nodes(frame, options, total_time=None):
'''
Remove nodes that represent less than e.g. 1% of the output
'''
if frame is None:
return None
if total_time is None:
total_time = frame.time()
filter_threshold = options.get('filter_threshold', 0.01)
for child in frame.children:
proportion_of_total = child.time() / total_time
if proportion_of_total < filter_threshold:
frame.self_time += child.time()
child.remove_from_parent()
for child in frame.children:
remove_irrelevant_nodes(child, options=options, total_time=total_time)
return frame | Remove nodes that represent less than e.g. 1% of the output | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/processors.py#L147-L169 | [
"def remove_irrelevant_nodes(frame, options, total_time=None):\n '''\n Remove nodes that represent less than e.g. 1% of the output\n '''\n if frame is None:\n return None\n\n if total_time is None:\n total_time = frame.time()\n\n filter_threshold = options.get('filter_threshold', 0.0... | '''
Processors are functions that take a Frame object, and mutate the tree to perform some task.
They can mutate the tree in-place, but also can change the root frame, they should always be
called like:
frame = processor(frame, options=...)
'''
import re
from operator import methodcaller
from pyinstrument.frame import FrameGroup, SelfTimeFrame
def remove_importlib(frame, options):
if frame is None:
return None
for child in frame.children:
remove_importlib(child, options=options)
if '<frozen importlib._bootstrap' in child.file_path:
# remove this node, moving the self_time and children up to the parent
frame.self_time += child.self_time
frame.add_children(child.children, after=child)
child.remove_from_parent()
return frame
def aggregate_repeated_calls(frame, options):
'''
Converts a timeline into a time-aggregate summary.
Adds together calls along the same call stack, so that repeated calls appear as the same
frame. Removes time-linearity - frames are sorted according to total time spent.
Useful for outputs that display a summary of execution (e.g. text and html outputs)
'''
if frame is None:
return None
children_by_identifier = {}
# iterate over a copy of the children since it's going to mutate while we're iterating
for child in frame.children:
if child.identifier in children_by_identifier:
aggregate_frame = children_by_identifier[child.identifier]
# combine the two frames, putting the children and self_time into the aggregate frame.
aggregate_frame.self_time += child.self_time
if child.children:
aggregate_frame.add_children(child.children)
# remove this frame, it's been incorporated into aggregate_frame
child.remove_from_parent()
else:
# never seen this identifier before. It becomes the aggregate frame.
children_by_identifier[child.identifier] = child
# recurse into the children
for child in frame.children:
aggregate_repeated_calls(child, options=options)
# sort the children by time
# it's okay to use the internal _children list, sinde we're not changing the tree
# structure.
frame._children.sort(key=methodcaller('time'), reverse=True) # pylint: disable=W0212
return frame
def group_library_frames_processor(frame, options):
if frame is None:
return None
hide_regex = options.get('hide_regex', r'.*[\\\/]lib[\\\/].*')
show_regex = options.get('show_regex')
def should_be_hidden(frame):
return ((hide_regex and re.match(hide_regex, frame.file_path))
and not (show_regex and re.match(show_regex, frame.file_path)))
def add_frames_to_group(frame, group):
group.add_frame(frame)
for child in frame.children:
if should_be_hidden(child):
add_frames_to_group(child, group)
for child in frame.children:
if not child.group and (should_be_hidden(child)
and any(should_be_hidden(cc) for cc in child.children)):
group = FrameGroup(child)
add_frames_to_group(child, group)
group_library_frames_processor(child, options=options)
return frame
def merge_consecutive_self_time(frame, options):
'''
Combines consecutive 'self time' frames
'''
if frame is None:
return None
previous_self_time_frame = None
for child in frame.children:
if isinstance(child, SelfTimeFrame):
if previous_self_time_frame:
# merge
previous_self_time_frame.self_time += child.self_time
child.remove_from_parent()
else:
# keep a reference, maybe it'll be added to on the next loop
previous_self_time_frame = child
else:
previous_self_time_frame = None
for child in frame.children:
merge_consecutive_self_time(child, options=options)
return frame
def remove_unnecessary_self_time_nodes(frame, options):
'''
When a frame has only one child, and that is a self-time frame, remove that node, since it's
unnecessary - it clutters the output and offers no additional information.
'''
if frame is None:
return None
if len(frame.children) == 1 and isinstance(frame.children[0], SelfTimeFrame):
child = frame.children[0]
frame.self_time += child.self_time
child.remove_from_parent()
for child in frame.children:
remove_unnecessary_self_time_nodes(child, options=options)
return frame
def remove_irrelevant_nodes(frame, options, total_time=None):
'''
Remove nodes that represent less than e.g. 1% of the output
'''
if frame is None:
return None
if total_time is None:
total_time = frame.time()
filter_threshold = options.get('filter_threshold', 0.01)
for child in frame.children:
proportion_of_total = child.time() / total_time
if proportion_of_total < filter_threshold:
frame.self_time += child.time()
child.remove_from_parent()
for child in frame.children:
remove_irrelevant_nodes(child, options=options, total_time=total_time)
return frame
|
joerick/pyinstrument | pyinstrument/vendor/decorator.py | decorate | python | def decorate(func, caller, extras=()):
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun | decorate(func, caller) decorates a function using a caller. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/vendor/decorator.py#L219-L234 | [
"def create(cls, obj, body, evaldict, defaults=None,\n doc=None, module=None, addsource=True, **attrs):\n \"\"\"\n Create a function from the strings name, signature and body.\n evaldict is the evaluation dictionary. If addsource is true an\n attribute __source__ is added to the result. The at... | # ######################### LICENSE ############################ #
# Copyright (c) 2005-2018, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
import re
import sys
import inspect
import operator
import itertools
import collections
__version__ = '4.3.1'
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
FullArgSpec = collections.namedtuple(
'FullArgSpec', 'args varargs varkw defaults '
'kwonlyargs kwonlydefaults annotations')
def getfullargspec(f):
"A quick and dirty replacement for getfullargspec for Python 2.X"
return FullArgSpec._make(inspect.getargspec(f) + ([], None, {}))
def get_init(cls):
return cls.__init__.__func__
try:
iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# let's assume there are no coroutine functions in old Python
def iscoroutinefunction(f):
return False
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<%s:decorator-gen-%d>' % (
__file__, next(self._compile_count))
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(%s func)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = defaults + (None,)
return dec
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
_contextmanager = decorator(ContextManager)
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
|
joerick/pyinstrument | pyinstrument/vendor/decorator.py | dispatch_on | python | def dispatch_on(*dispatch_args):
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec | Factory of decorators turning a function into a generic function
dispatching on the given arguments. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/vendor/decorator.py#L331-L433 | null | # ######################### LICENSE ############################ #
# Copyright (c) 2005-2018, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
from __future__ import print_function
import re
import sys
import inspect
import operator
import itertools
import collections
__version__ = '4.3.1'
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
FullArgSpec = collections.namedtuple(
'FullArgSpec', 'args varargs varkw defaults '
'kwonlyargs kwonlydefaults annotations')
def getfullargspec(f):
"A quick and dirty replacement for getfullargspec for Python 2.X"
return FullArgSpec._make(inspect.getargspec(f) + ([], None, {}))
def get_init(cls):
return cls.__init__.__func__
try:
iscoroutinefunction = inspect.iscoroutinefunction
except AttributeError:
# let's assume there are no coroutine functions in old Python
def iscoroutinefunction(f):
return False
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
# make pylint happy
args = varargs = varkw = defaults = kwonlyargs = kwonlydefaults = ()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults or None
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.search(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline for old Pythons
src += '\n'
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<%s:decorator-gen-%d>' % (
__file__, next(self._compile_count))
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except Exception:
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
caller = evaldict.get('_call_') # when called from `decorate`
if caller and iscoroutinefunction(caller):
body = ('async def %(name)s(%(signature)s):\n' + ibody).replace(
'return', 'return await')
else:
body = 'def %(name)s(%(signature)s):\n' + ibody
return self.make(body, evaldict, addsource, **attrs)
def decorate(func, caller, extras=()):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = dict(_call_=caller, _func_=func)
es = ''
for i, extra in enumerate(extras):
ex = '_e%d_' % i
evaldict[ex] = extra
es += ex + ', '
fun = FunctionMaker.create(
func, "return _call_(_func_, %s%%(shortsignature)s)" % es,
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
defaultargs, defaults = '', ()
if inspect.isclass(caller):
name = caller.__name__.lower()
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
doc = caller.__doc__
nargs = caller.__code__.co_argcount
ndefs = len(caller.__defaults__ or ())
defaultargs = ', '.join(caller.__code__.co_varnames[nargs-ndefs:nargs])
if defaultargs:
defaultargs += ','
defaults = caller.__defaults__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
doc = caller.__call__.__doc__
evaldict = dict(_call=caller, _decorate_=decorate)
dec = FunctionMaker.create(
'%s(%s func)' % (name, defaultargs),
'if func is None: return lambda func: _decorate_(func, _call, (%s))\n'
'return _decorate_(func, _call, (%s))' % (defaultargs, defaultargs),
evaldict, doc=doc, module=caller.__module__, __wrapped__=caller)
if defaults:
dec.__defaults__ = defaults + (None,)
return dec
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
_contextmanager = decorator(ContextManager)
def contextmanager(func):
# Enable Pylint config: contextmanager-decorators=decorator.contextmanager
return _contextmanager(func)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
|
joerick/pyinstrument | pyinstrument/renderers/html.py | HTMLRenderer.open_in_browser | python | def open_in_browser(self, session, output_filename=None):
if output_filename is None:
output_file = tempfile.NamedTemporaryFile(suffix='.html', delete=False)
output_filename = output_file.name
with codecs.getwriter('utf-8')(output_file) as f:
f.write(self.render(session))
else:
with codecs.open(output_filename, 'w', 'utf-8') as f:
f.write(self.render(session))
from pyinstrument.vendor.six.moves import urllib
url = urllib.parse.urlunparse(('file', '', output_filename, '', '', ''))
webbrowser.open(url)
return output_filename | Open the rendered HTML in a webbrowser.
If output_filename=None (the default), a tempfile is used.
The filename of the HTML file is returned. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/renderers/html.py#L43-L64 | [
"def render(self, session):\n resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'html_resources/')\n\n if not os.path.exists(os.path.join(resources_dir, 'app.js')):\n raise RuntimeError(\"Could not find app.js. If you are running \"\n \"pyinstrument from... | class HTMLRenderer(Renderer):
def render(self, session):
resources_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'html_resources/')
if not os.path.exists(os.path.join(resources_dir, 'app.js')):
raise RuntimeError("Could not find app.js. If you are running "
"pyinstrument from a git checkout, run 'python "
"setup.py build' to compile the Javascript "
"(requires nodejs).")
with io.open(os.path.join(resources_dir, 'app.js'), encoding='utf-8') as f:
js = f.read()
session_json = self.render_json(session)
page = u'''<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
</head>
<body>
<div id="app"></div>
<script>
window.profileSession = {session_json}
</script>
<script>
{js}
</script>
</body>
</html>'''.format(js=js, session_json=session_json)
return page
def render_json(self, session):
json_renderer = JSONRenderer()
json_renderer.processors = self.processors
return json_renderer.render(session)
def default_processors(self):
return [
processors.remove_importlib,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.group_library_frames_processor,
processors.remove_unnecessary_self_time_nodes,
processors.remove_irrelevant_nodes,
]
|
joerick/pyinstrument | setup.py | BuildPyCommand.run | python | def run(self):
'''compile the JS, then run superclass implementation'''
if subprocess.call(['npm', '--version']) != 0:
raise RuntimeError('npm is required to build the HTML renderer.')
self.check_call(['npm', 'install'], cwd=HTML_RENDERER_DIR)
self.check_call(['npm', 'run', 'build'], cwd=HTML_RENDERER_DIR)
self.copy_file(HTML_RENDERER_DIR+'/dist/js/app.js', 'pyinstrument/renderers/html_resources/app.js')
setuptools.command.build_py.build_py.run(self) | compile the JS, then run superclass implementation | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/setup.py#L19-L30 | null | class BuildPyCommand(setuptools.command.build_py.build_py, CommandUtilities):
"""Custom build command."""
|
joerick/pyinstrument | pyinstrument/util.py | deprecated | python | def deprecated(func, *args, **kwargs):
''' Marks a function as deprecated. '''
warnings.warn(
'{} is deprecated and should no longer be used.'.format(func),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs) | Marks a function as deprecated. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/util.py#L18-L25 | null | import importlib, warnings
from pyinstrument.vendor.decorator import decorator
def object_with_import_path(import_path):
if '.' not in import_path:
raise ValueError("Can't import '%s', it is not a valid import path" % import_path)
module_path, object_name = import_path.rsplit('.', 1)
module = importlib.import_module(module_path)
return getattr(module, object_name)
def truncate(string, max_length):
if len(string) > max_length:
return string[0:max_length-3]+'...'
return string
@decorator
def deprecated_option(option_name, message=''):
''' Marks an option as deprecated. '''
def caller(func, *args, **kwargs):
if option_name in kwargs:
warnings.warn(
'{} is deprecated. {}'.format(option_name, message),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs)
return decorator(caller)
|
joerick/pyinstrument | pyinstrument/util.py | deprecated_option | python | def deprecated_option(option_name, message=''):
''' Marks an option as deprecated. '''
def caller(func, *args, **kwargs):
if option_name in kwargs:
warnings.warn(
'{} is deprecated. {}'.format(option_name, message),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs)
return decorator(caller) | Marks an option as deprecated. | train | https://github.com/joerick/pyinstrument/blob/cc4f3f6fc1b493d7cd058ecf41ad012e0030a512/pyinstrument/util.py#L27-L38 | [
"def decorator(caller, _func=None):\n \"\"\"decorator(caller) converts a caller function into a decorator\"\"\"\n if _func is not None: # return a decorated function\n # this is obsolete behavior; you should use decorate instead\n return decorate(_func, caller)\n # else return a decorator fu... | import importlib, warnings
from pyinstrument.vendor.decorator import decorator
def object_with_import_path(import_path):
if '.' not in import_path:
raise ValueError("Can't import '%s', it is not a valid import path" % import_path)
module_path, object_name = import_path.rsplit('.', 1)
module = importlib.import_module(module_path)
return getattr(module, object_name)
def truncate(string, max_length):
if len(string) > max_length:
return string[0:max_length-3]+'...'
return string
@decorator
def deprecated(func, *args, **kwargs):
''' Marks a function as deprecated. '''
warnings.warn(
'{} is deprecated and should no longer be used.'.format(func),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs)
def deprecated_option(option_name, message=''):
''' Marks an option as deprecated. '''
def caller(func, *args, **kwargs):
if option_name in kwargs:
warnings.warn(
'{} is deprecated. {}'.format(option_name, message),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs)
return decorator(caller)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.create_repo | python | def create_repo(self, repo_name, description=None):
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata) | Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L45-L58 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.inspect_repo | python | def inspect_repo(self, repo_name):
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res | Returns info about a specific Repo.
Params:
* repo_name: Name of the repo. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L60-L69 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.list_repo | python | def list_repo(self):
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return [] | Returns info about all Repos. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L71-L79 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.delete_repo | python | def delete_repo(self, repo_name=None, force=False, all=False):
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True") | Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L81-L102 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.start_commit | python | def start_commit(self, repo_name, branch=None, parent=None, description=None):
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res | Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L104-L129 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.finish_commit | python | def finish_commit(self, commit):
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res | Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L131-L142 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.commit | python | def commit(self, repo_name, branch=None, parent=None, description=None):
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit) | A context manager for doing stuff inside a commit. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L145-L155 | [
"def start_commit(self, repo_name, branch=None, parent=None, description=None):\n \"\"\"\n Begins the process of committing data to a Repo. Once started you can\n write to the Commit with PutFile and when all the data has been\n written you must finish the Commit with FinishCommit. NOTE, data is\n no... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.inspect_commit | python | def inspect_commit(self, commit):
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata) | Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L157-L165 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.list_commit | python | def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return [] | Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L177-L200 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.delete_commit | python | def delete_commit(self, commit):
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata) | Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L202-L210 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.flush_commit | python | def flush_commit(self, commits, repos=tuple()):
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res | Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L212-L233 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.subscribe_commit | python | def subscribe_commit(self, repo_name, branch, from_commit_id=None):
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res | SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L235-L251 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.list_branch | python | def list_branch(self, repo_name):
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return [] | Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L253-L264 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.set_branch | python | def set_branch(self, commit, branch_name):
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata) | Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L266-L275 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.delete_branch | python | def delete_branch(self, repo_name, branch_name):
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata) | Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L277-L288 | null | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.put_file_bytes | python | def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata) | Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L290-L361 | [
"def wrap(value):\n for i in itertools.count():\n chunk = value.read(BUFFER_SIZE)\n\n if len(chunk) == 0:\n return\n\n if i == 0:\n yield proto.PutFileRequest(\n file=proto.File(commit=commit_from(commit), path=path),\n value=chunk,\n ... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.put_file_url | python | def put_file_url(self, commit, path, url, recursive=False):
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata) | Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L363-L382 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.get_file | python | def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res | Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L384-L409 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.get_files | python | def get_files(self, commit, paths, recursive=False):
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths} | Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L411-L432 | [
"def inspect_file(self, commit, path):\n \"\"\"\n Returns info about a specific file.\n\n Params:\n * commit: A tuple, string, or Commit object representing the commit.\n * path: Path to file.\n \"\"\"\n req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))\n ... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.inspect_file | python | def inspect_file(self, commit, path):
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res | Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L434-L444 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.list_file | python | def list_file(self, commit, path, recursive=False):
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos) | Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L446-L466 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_file(self, commit, path):
"""
Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
"""
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata)
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
pachyderm/python-pachyderm | src/python_pachyderm/pfs_client.py | PfsClient.delete_file | python | def delete_file(self, commit, path):
req = proto.DeleteFileRequest(file=proto.File(commit=commit_from(commit), path=path))
self.stub.DeleteFile(req, metadata=self.metadata) | Deletes a file from a Commit. DeleteFile leaves a tombstone in the
Commit, assuming the file isn't written to later attempting to get the
file from the finished commit will result in not found error. The file
will of course remain intact in the Commit's parent.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file. | train | https://github.com/pachyderm/python-pachyderm/blob/1c58cf91d30e03716a4f45213989e890f7b8a78c/src/python_pachyderm/pfs_client.py#L475-L487 | [
"def commit_from(src, allow_just_repo=False):\n if isinstance(src, pfs_proto.Commit):\n return src\n elif isinstance(src, (tuple, list)) and len(src) == 2:\n return pfs_proto.Commit(repo=pfs_proto.Repo(name=src[0]), id=src[1])\n elif isinstance(src, six.string_types):\n repo_name, comm... | class PfsClient(object):
def __init__(self, host=None, port=None, auth_token=None):
"""
Creates a client to connect to PFS.
Params:
* host: The pachd host. Default is 'localhost', which is used with
`pachctl port-forward`.
* port: The port to connect to. Default is 30650.
* auth_token: The authentication token; used if authentication is
enabled on the cluster. Default to `None`.
"""
address = get_address(host, port)
self.metadata = get_metadata(auth_token)
self.channel = grpc.grpc.insecure_channel(address)
self.stub = grpc.APIStub(self.channel)
def create_repo(self, repo_name, description=None):
"""
Creates a new Repo object in PFS with the given name. Repos are the
top level data object in PFS and should be used to store data of a
similar type. For example rather than having a single Repo for an
entire project you might have separate Repos for logs, metrics,
database dumps etc.
Params:
* repo_name: Name of the repo.
* description: Repo description.
"""
req = proto.CreateRepoRequest(repo=proto.Repo(name=repo_name), description=description)
self.stub.CreateRepo(req, metadata=self.metadata)
def inspect_repo(self, repo_name):
"""
Returns info about a specific Repo.
Params:
* repo_name: Name of the repo.
"""
req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name))
res = self.stub.InspectRepo(req, metadata=self.metadata)
return res
def list_repo(self):
"""
Returns info about all Repos.
"""
req = proto.ListRepoRequest()
res = self.stub.ListRepo(req, metadata=self.metadata)
if hasattr(res, 'repo_info'):
return res.repo_info
return []
def delete_repo(self, repo_name=None, force=False, all=False):
"""
Deletes a repo and reclaims the storage space it was using.
Params:
* repo_name: The name of the repo.
* force: If set to true, the repo will be removed regardless of
errors. This argument should be used with care.
* all: Delete all repos.
"""
if not all:
if repo_name:
req = proto.DeleteRepoRequest(repo=proto.Repo(name=repo_name), force=force)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Either a repo_name or all=True needs to be provided")
else:
if not repo_name:
req = proto.DeleteRepoRequest(force=force, all=all)
self.stub.DeleteRepo(req, metadata=self.metadata)
else:
raise ValueError("Cannot specify a repo_name if all=True")
def start_commit(self, repo_name, branch=None, parent=None, description=None):
"""
Begins the process of committing data to a Repo. Once started you can
write to the Commit with PutFile and when all the data has been
written you must finish the Commit with FinishCommit. NOTE, data is
not persisted until FinishCommit is called. A Commit object is
returned.
Params:
* repo_name: The name of the repo.
* branch: A more convenient way to build linear chains of commits.
When a commit is started with a non-empty branch the value of branch
becomes an alias for the created Commit. This enables a more intuitive
access pattern. When the commit is started on a branch the previous
head of the branch is used as the parent of the commit.
* parent: Specifies the parent Commit, upon creation the new Commit
will appear identical to the parent Commit, data can safely be added
to the new commit without affecting the contents of the parent Commit.
You may pass "" as parentCommit in which case the new Commit will have
no parent and will initially appear empty.
* description: (optional) explanation of the commit for clarity.
"""
req = proto.StartCommitRequest(parent=proto.Commit(repo=proto.Repo(name=repo_name), id=parent), branch=branch,
description=description)
res = self.stub.StartCommit(req, metadata=self.metadata)
return res
def finish_commit(self, commit):
"""
Ends the process of committing data to a Repo and persists the
Commit. Once a Commit is finished the data becomes immutable and
future attempts to write to it with PutFile will error.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.FinishCommitRequest(commit=commit_from(commit))
res = self.stub.FinishCommit(req, metadata=self.metadata)
return res
@contextmanager
def commit(self, repo_name, branch=None, parent=None, description=None):
"""A context manager for doing stuff inside a commit."""
commit = self.start_commit(repo_name, branch, parent, description)
try:
yield commit
except Exception as e:
print("An exception occurred during an open commit. "
"Trying to finish it (Currently a commit can't be cancelled)")
raise e
finally:
self.finish_commit(commit)
def inspect_commit(self, commit):
"""
Returns info about a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.InspectCommitRequest(commit=commit_from(commit))
return self.stub.InspectCommit(req, metadata=self.metadata)
def provenances_for_repo(self, repo_name):
provenances = {}
commits = self.list_commit(repo_name)
sorted_commits = [x[0] for x in
sorted([(c.commit.id, c.finished.seconds) for c in commits], key=lambda x: x[1])]
for c in sorted_commits:
for p in c.provenance:
provenances[p.id] = c.commit.id
return provenances
def list_commit(self, repo_name, to_commit=None, from_commit=None, number=0):
"""
Gets a list of CommitInfo objects.
Params:
* repo_name: If only `repo_name` is given, all commits in the repo are
returned.
* to_commit: Optional. Only the ancestors of `to`, including `to`
itself, are considered.
* from_commit: Optional. Only the descendants of `from`, including
`from` itself, are considered.
* number: Optional. Determines how many commits are returned. If
`number` is 0, all commits that match the aforementioned criteria are
returned.
"""
req = proto.ListCommitRequest(repo=proto.Repo(name=repo_name), number=number)
if to_commit is not None:
req.to.CopyFrom(commit_from(to_commit))
if from_commit is not None:
getattr(req, 'from').CopyFrom(commit_from(from_commit))
res = self.stub.ListCommit(req, metadata=self.metadata)
if hasattr(res, 'commit_info'):
return res.commit_info
return []
def delete_commit(self, commit):
"""
Deletes a commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
"""
req = proto.DeleteCommitRequest(commit=commit_from(commit))
self.stub.DeleteCommit(req, metadata=self.metadata)
def flush_commit(self, commits, repos=tuple()):
"""
Blocks until all of the commits which have a set of commits as
provenance have finished. For commits to be considered they must have
all of the specified commits as provenance. This in effect waits for
all of the jobs that are triggered by a set of commits to complete.
It returns an error if any of the commits it's waiting on are
cancelled due to one of the jobs encountering an error during runtime.
Note that it's never necessary to call FlushCommit to run jobs,
they'll run no matter what, FlushCommit just allows you to wait for
them to complete and see their output once they do. This returns an
iterator of CommitInfo objects.
Params:
* commits: A commit or a list of commits to wait on.
* repos: Optional. Only the commits up to and including those repos.
will be considered, otherwise all repos are considered.
"""
req = proto.FlushCommitRequest(commit=[commit_from(c) for c in commits],
to_repo=[proto.Repo(name=r) for r in repos])
res = self.stub.FlushCommit(req, metadata=self.metadata)
return res
def subscribe_commit(self, repo_name, branch, from_commit_id=None):
"""
SubscribeCommit is like ListCommit but it keeps listening for commits as
they come in. This returns an iterator Commit objects.
Params:
* repo_name: Name of the repo.
* branch: Branch to subscribe to.
* from_commit_id: Optional. Only commits created since this commit
are returned.
"""
repo = proto.Repo(name=repo_name)
req = proto.SubscribeCommitRequest(repo=repo, branch=branch)
if from_commit_id is not None:
getattr(req, 'from').CopyFrom(proto.Commit(repo=repo, id=from_commit_id))
res = self.stub.SubscribeCommit(req, metadata=self.metadata)
return res
def list_branch(self, repo_name):
"""
Lists the active Branch objects on a Repo.
Params:
* repo_name: The name of the repo.
"""
req = proto.ListBranchRequest(repo=proto.Repo(name=repo_name))
res = self.stub.ListBranch(req, metadata=self.metadata)
if hasattr(res, 'branch_info'):
return res.branch_info
return []
def set_branch(self, commit, branch_name):
"""
Sets a commit and its ancestors as a branch.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* branch_name: The name for the branch to set.
"""
res = proto.SetBranchRequest(commit=commit_from(commit), branch=branch_name)
self.stub.SetBranch(res, metadata=self.metadata)
def delete_branch(self, repo_name, branch_name):
"""
Deletes a branch, but leaves the commits themselves intact. In other
words, those commits can still be accessed via commit IDs and other
branches they happen to be on.
Params:
* repo_name: The name of the repo.
* branch_name: The name of the branch to delete.
"""
res = proto.DeleteBranchRequest(repo=Repo(name=repo_name), branch=branch_name)
self.stub.DeleteBranch(res, metadata=self.metadata)
def put_file_bytes(self, commit, path, value, delimiter=proto.NONE,
target_file_datums=0, target_file_bytes=0, overwrite_index=None):
"""
Uploads a binary bytes array as file(s) in a certain path.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path in the repo the file(s) will be written to.
* value: The file contents as bytes, represented as a file-like
object, bytestring, or iterator of bytestrings.
* delimiter: Optional. causes data to be broken up into separate files
with `path` as a prefix.
* target_file_datums: Optional. Specifies the target number of datums
in each written file. It may be lower if data does not split evenly,
but will never be higher, unless the value is 0.
* target_file_bytes: Specifies the target number of bytes in each
written file, files may have more or fewer bytes than the target.
"""
overwrite_index_proto = proto.OverwriteIndex(index=overwrite_index) if overwrite_index is not None else None
if hasattr(value, "read"):
def wrap(value):
for i in itertools.count():
chunk = value.read(BUFFER_SIZE)
if len(chunk) == 0:
return
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
elif isinstance(value, collections.Iterable) and not isinstance(value, (six.string_types, six.binary_type)):
def wrap(value):
for i, chunk in enumerate(value):
if i == 0:
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=chunk,
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
else:
yield proto.PutFileRequest(value=chunk)
else:
def wrap(value):
yield proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
value=value[:BUFFER_SIZE],
delimiter=delimiter,
target_file_datums=target_file_datums,
target_file_bytes=target_file_bytes,
overwrite_index=overwrite_index_proto
)
for i in range(BUFFER_SIZE, len(value), BUFFER_SIZE):
yield proto.PutFileRequest(
value=value[i:i + BUFFER_SIZE],
overwrite_index=overwrite_index_proto
)
self.stub.PutFile(wrap(value), metadata=self.metadata)
def put_file_url(self, commit, path, url, recursive=False):
"""
Puts a file using the content found at a URL. The URL is sent to the
server which performs the request.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the file.
* url: The url of the file to put.
* recursive: allow for recursive scraping of some types URLs for
example on s3:// urls.
"""
req = iter([
proto.PutFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
url=url,
recursive=recursive
)
])
self.stub.PutFile(req, metadata=self.metadata)
def get_file(self, commit, path, offset_bytes=0, size_bytes=0, extract_value=True):
"""
Returns an iterator of the contents contents of a file at a specific Commit.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path of the file.
* offset_bytes: Optional. specifies a number of bytes that should be
skipped in the beginning of the file.
* size_bytes: Optional. limits the total amount of data returned, note
you will get fewer bytes than size if you pass a value larger than the
size of the file. If size is set to 0 then all of the data will be
returned.
* extract_value: If True, then an ExtractValueIterator will be return,
which will iterate over the bytes of the file. If False, then the
protobuf response iterator will return.
"""
req = proto.GetFileRequest(
file=proto.File(commit=commit_from(commit), path=path),
offset_bytes=offset_bytes,
size_bytes=size_bytes
)
res = self.stub.GetFile(req, metadata=self.metadata)
if extract_value:
return ExtractValueIterator(res)
return res
def get_files(self, commit, paths, recursive=False):
"""
Returns the contents of a list of files at a specific Commit as a
dictionary of file paths to data.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* paths: A list of paths to retrieve.
* recursive: If True, will go into each directory in the list
recursively.
"""
filtered_file_infos = []
for path in paths:
fi = self.inspect_file(commit, path)
if fi.file_type == proto.FILE:
filtered_file_infos.append(fi)
else:
filtered_file_infos += self.list_file(commit, path, recursive=recursive)
filtered_paths = [fi.file.path for fi in filtered_file_infos if fi.file_type == proto.FILE]
return {path: b''.join(self.get_file(commit, path)) for path in filtered_paths}
def inspect_file(self, commit, path):
"""
Returns info about a specific file.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: Path to file.
"""
req = proto.InspectFileRequest(file=proto.File(commit=commit_from(commit), path=path))
res = self.stub.InspectFile(req, metadata=self.metadata)
return res
def list_file(self, commit, path, recursive=False):
"""
Lists the files in a directory.
Params:
* commit: A tuple, string, or Commit object representing the commit.
* path: The path to the directory.
* recursive: If True, continue listing the files for sub-directories.
"""
req = proto.ListFileRequest(
file=proto.File(commit=commit_from(commit), path=path)
)
res = self.stub.ListFile(req, metadata=self.metadata)
file_infos = res.file_info
if recursive:
dirs = [f for f in file_infos if f.file_type == proto.DIR]
files = [f for f in file_infos if f.file_type == proto.FILE]
return sum([self.list_file(commit, d.file.path, recursive) for d in dirs], files)
return list(file_infos)
def glob_file(self, commit, pattern):
req = proto.GlobFileRequest(commit=commit_from(commit), pattern=pattern)
res = self.stub.GlobFile(req, metadata=self.metadata)
if hasattr(res, 'file_info'):
return res.file_info
return []
def delete_all(self):
req = proto.google_dot_protobuf_dot_empty__pb2.Empty()
self.stub.DeleteAll(req, metadata=self.metadata)
|
antiboredom/videogrep | videogrep/vtt.py | parse_auto_sub | python | def parse_auto_sub(text):
'''
Parses webvtt and returns timestamps for words and lines
Tested on automatically generated subtitles from YouTube
'''
pat = r'<(\d\d:\d\d:\d\d(\.\d+)?)>'
out = []
lines = []
data = text.split('\n')
data = [d for d in data if re.search(r'\d\d:\d\d:\d\d', d) is not None]
for i, d in enumerate(data):
if re.search(pat, d):
lines.append((data[i-1], d))
if len(lines) > 0:
out = parse_cued(lines)
else:
out = parse_uncued(text)
return out | Parses webvtt and returns timestamps for words and lines
Tested on automatically generated subtitles from YouTube | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/vtt.py#L92-L113 | [
"def parse_cued(data):\n out = []\n pat = r'<(\\d\\d:\\d\\d:\\d\\d(\\.\\d+)?)>'\n\n for lines in data:\n meta, content = lines\n start, end = meta.split(' --> ')\n end = end.split(' ')[0]\n start = timestamp_to_secs(start)\n end = timestamp_to_secs(end)\n text = Be... | # encoding=utf8
from __future__ import print_function
from __future__ import unicode_literals
import re
from bs4 import BeautifulSoup
def timestamp_to_secs(ts):
hours, minutes, seconds = ts.split(':')
return float(hours)*60*60 + float(minutes)*60 + float(seconds)
def secs_to_timestamp(secs):
m, s = divmod(secs, 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02f" % (h, m, s)
def parse_cued(data):
out = []
pat = r'<(\d\d:\d\d:\d\d(\.\d+)?)>'
for lines in data:
meta, content = lines
start, end = meta.split(' --> ')
end = end.split(' ')[0]
start = timestamp_to_secs(start)
end = timestamp_to_secs(end)
text = BeautifulSoup(content, 'html.parser').text
words = text.split(' ')
sentence = {'text': '', 'words': []}
for word in words:
item = {}
item['start'] = start
item['end'] = end
word_parts = re.split(pat, word)
item['word'] = word_parts[0]
if len(word_parts) > 1:
item['end'] = timestamp_to_secs(word_parts[1])
sentence['words'].append(item)
start = item['end']
sentence['text'] = ' '.join([w['word'] for w in sentence['words']])
out.append(sentence)
for index, sentence in enumerate(out):
if index == 0:
sentence['start'] = sentence['words'][0]['start']
sentence['end'] = sentence['words'][-1]['end']
continue
first_word = sentence['words'][0]
last_word = out[index-1]['words'][-1]
if last_word['end'] > first_word['start']:
last_word['end'] = first_word['start']
sentence['start'] = sentence['words'][0]['start']
sentence['end'] = sentence['words'][-1]['end']
return out
def parse_uncued(data):
out = []
data = [d.strip() for d in data.split('\n') if d.strip() != '']
out = [{'text': '', 'start': None, 'end': None}]
for i, line in enumerate(data):
if ' --> ' in line:
start, end = line.split(' --> ')
end = end.split(' ')[0]
start = timestamp_to_secs(start)
end = timestamp_to_secs(end)
if out[-1]['start'] is None:
out[-1]['start'] = start
out[-1]['end'] = end
else:
out.append({'text': '', 'start': start, 'end': end})
else:
if out[-1]['start'] is not None:
out[-1]['text'] += ' ' + line.strip()
for o in out:
o['text'] = o['text'].strip()
return out
def convert_to_srt(sentence):
out = []
for i, sentence in enumerate(sentence):
out.append(str(i))
start = sentence['words'][0]['start']
end = sentence['words'][-1]['end']
start = secs_to_timestamp(start)
end = secs_to_timestamp(end)
out.append('{} --> {}'.format(start, end))
out.append(sentence['text'])
out.append('')
return '\n'.join(out)
def convert_to_sphinx(sentences):
out = []
for sentence in sentences:
start = sentence['words'][0]['start']
end = sentence['words'][-1]['end']
out.append(sentence['text'])
out.append('<s> {} {} .9'.format(start, start))
for word in sentence['words']:
out.append('{} {} {} {}'.format(word['word'], word['start'], word['end'], '.999'))
out.append('</s> {} {} .9'.format(end, end))
return '\n'.join(out)
if __name__ == '__main__':
import sys
with open(sys.argv[1]) as infile:
text = infile.read()
sentences = parse_auto_sub(text)
print(convert_to_srt(sentences))
|
antiboredom/videogrep | videogrep/timecode.py | Timecode.framerate | python | def framerate(self, framerate):
# set the int_frame_rate
if framerate == '29.97':
self._int_framerate = 30
self.drop_frame = True
elif framerate == '59.94':
self._int_framerate = 60
self.drop_frame = True
elif framerate == '23.98':
self._int_framerate = 24
elif framerate == 'ms':
self._int_framerate = 1000
framerate = 1000
elif framerate == 'frames':
self._int_framerate = 1
else:
self._int_framerate = int(framerate)
self._framerate = framerate | setter for the framerate attribute
:param framerate:
:return: | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/timecode.py#L80-L102 | null | class Timecode(object):
def __init__(self, framerate, start_timecode=None, start_seconds=None,
frames=None):
"""The main timecode class.
Does all the calculation over frames, so the main data it holds is
frames, then when required it converts the frames to a timecode by
using the frame rate setting.
:param str framerate: The frame rate of the Timecode instance. It
should be one of ['23.98', '24', '25', '29.97', '30', '50', '59.94',
'60', 'ms'] where "ms" equals to 1000 fps. Can not be skipped.
Setting the framerate will automatically set the :attr:`.drop_frame`
attribute to correct value.
:param start_timecode: The start timecode. Use this to be able to
set the timecode of this Timecode instance. It can be skipped and
then the frames attribute will define the timecode, and if it is also
skipped then the start_second attribute will define the start
timecode, and if start_seconds is also skipped then the default value
of '00:00:00:00' will be used.
:type start_timecode: str or None
:param start_seconds: A float or integer value showing the seconds.
:param int frames: Timecode objects can be initialized with an
integer number showing the total frames.
"""
self.drop_frame = False
self._int_framerate = None
self._framerate = None
self.framerate = framerate
self.frames = None
# attribute override order
# start_timecode > frames > start_seconds
if start_timecode:
self.frames = self.tc_to_frames(start_timecode)
else:
if frames is not None: # because 0==False, and frames can be 0
self.frames = frames
elif start_seconds is not None:
self.frames = self.float_to_tc(start_seconds)
else:
# use default value of 00:00:00:00
self.frames = self.tc_to_frames('00:00:00:00')
@property
def framerate(self):
"""getter for _framerate attribute
"""
return self._framerate
@framerate.setter
def set_timecode(self, timecode):
"""Sets the frames by using the given timecode
"""
self.frames = self.tc_to_frames(timecode)
def float_to_tc(self, seconds):
"""set the frames by using the given seconds
"""
return int(seconds * self._int_framerate)
def tc_to_frames(self, timecode):
"""Converts the given timecode to frames
"""
hours, minutes, seconds, frames = map(int, timecode.split(':'))
ffps = float(self._framerate)
if self.drop_frame:
# Number of drop frames is 6% of framerate rounded to nearest
# integer
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# We don't need the exact framerate anymore, we just need it rounded to
# nearest integer
ifps = self._int_framerate
# Number of frames per hour (non-drop)
hour_frames = ifps * 60 * 60
# Number of frames per minute (non-drop)
minute_frames = ifps * 60
# Total number of minutes
total_minutes = (60 * hours) + minutes
frame_number = \
((hour_frames * hours) + (minute_frames * minutes) +
(ifps * seconds) + frames) - \
(drop_frames * (total_minutes - (total_minutes // 10)))
frames = frame_number + 1
return frames
def frames_to_tc(self, frames):
"""Converts frames back to timecode
:returns str: the string representation of the current time code
"""
if frames == 0:
return 0, 0, 0, 0
ffps = float(self._framerate)
if self.drop_frame:
# Number of frames to drop on the minute marks is the nearest
# integer to 6% of the framerate
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# Number of frames in an hour
frames_per_hour = int(round(ffps * 60 * 60))
# Number of frames in a day - timecode rolls over after 24 hours
frames_per_24_hours = frames_per_hour * 24
# Number of frames per ten minutes
frames_per_10_minutes = int(round(ffps * 60 * 10))
# Number of frames per minute is the round of the framerate * 60 minus
# the number of dropped frames
frames_per_minute = int(round(ffps)*60) - drop_frames
frame_number = frames - 1
if frame_number < 0:
# Negative time. Add 24 hours.
frame_number += frames_per_24_hours
# If frame_number is greater than 24 hrs, next operation will rollover
# clock
frame_number %= frames_per_24_hours
if self.drop_frame:
d = frame_number // frames_per_10_minutes
m = frame_number % frames_per_10_minutes
if m > drop_frames:
frame_number += (drop_frames * 9 * d) + \
drop_frames * ((m - drop_frames) // frames_per_minute)
else:
frame_number += drop_frames * 9 * d
ifps = self._int_framerate
frs = frame_number % ifps
secs = (frame_number // ifps) % 60
mins = ((frame_number // ifps) // 60) % 60
hrs = (((frame_number // ifps) // 60) // 60)
return hrs, mins, secs, frs
@classmethod
def parse_timecode(cls, timecode):
"""parses timecode string frames '00:00:00:00' or '00:00:00;00' or
milliseconds '00:00:00:000'
"""
bfr = timecode.replace(';', ':').replace('.', ':').split(':')
hrs = int(bfr[0])
mins = int(bfr[1])
secs = int(bfr[2])
frs = int(bfr[3])
return hrs, mins, secs, frs
def __iter__(self):
return self
def next(self):
self.add_frames(1)
return self
def back(self):
self.sub_frames(1)
return self
def add_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.frames += frames
def sub_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.add_frames(-frames)
def mult_frames(self, frames):
"""multiply frames
"""
self.frames *= frames
def div_frames(self, frames):
"""adds or subtracts frames number of frames"""
self.frames = self.frames / frames
def __eq__(self, other):
"""the overridden equality operator
"""
if isinstance(other, Timecode):
return self._framerate == other._framerate and \
self.frames == other.frames
elif isinstance(other, str):
new_tc = Timecode(self._framerate, other)
return self.__eq__(new_tc)
elif isinstance(other, int):
return self.frames == other
def __add__(self, other):
"""returns new Timecode instance with the given timecode or frames
added to this one
"""
# duplicate current one
tc = Timecode(self._framerate, frames=self.frames)
if isinstance(other, Timecode):
tc.add_frames(other.frames)
elif isinstance(other, int):
tc.add_frames(other)
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return tc
def __sub__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
subtracted_frames = self.frames - other.frames
elif isinstance(other, int):
subtracted_frames = self.frames - other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=subtracted_frames)
def __mul__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
multiplied_frames = self.frames * other.frames
elif isinstance(other, int):
multiplied_frames = self.frames * other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=multiplied_frames)
def __div__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
div_frames = self.frames / other.frames
elif isinstance(other, int):
div_frames = self.frames / other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=div_frames)
def __repr__(self):
return "%02d:%02d:%02d:%02d" % \
self.frames_to_tc(self.frames)
@property
def hrs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return hrs
@property
def mins(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return mins
@property
def secs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return secs
@property
def frs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return frs
@property
def frame_number(self):
"""returns the 0 based frame number of the current timecode instance
"""
return self.frames - 1
|
antiboredom/videogrep | videogrep/timecode.py | Timecode.tc_to_frames | python | def tc_to_frames(self, timecode):
hours, minutes, seconds, frames = map(int, timecode.split(':'))
ffps = float(self._framerate)
if self.drop_frame:
# Number of drop frames is 6% of framerate rounded to nearest
# integer
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# We don't need the exact framerate anymore, we just need it rounded to
# nearest integer
ifps = self._int_framerate
# Number of frames per hour (non-drop)
hour_frames = ifps * 60 * 60
# Number of frames per minute (non-drop)
minute_frames = ifps * 60
# Total number of minutes
total_minutes = (60 * hours) + minutes
frame_number = \
((hour_frames * hours) + (minute_frames * minutes) +
(ifps * seconds) + frames) - \
(drop_frames * (total_minutes - (total_minutes // 10)))
frames = frame_number + 1
return frames | Converts the given timecode to frames | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/timecode.py#L114-L148 | null | class Timecode(object):
def __init__(self, framerate, start_timecode=None, start_seconds=None,
frames=None):
"""The main timecode class.
Does all the calculation over frames, so the main data it holds is
frames, then when required it converts the frames to a timecode by
using the frame rate setting.
:param str framerate: The frame rate of the Timecode instance. It
should be one of ['23.98', '24', '25', '29.97', '30', '50', '59.94',
'60', 'ms'] where "ms" equals to 1000 fps. Can not be skipped.
Setting the framerate will automatically set the :attr:`.drop_frame`
attribute to correct value.
:param start_timecode: The start timecode. Use this to be able to
set the timecode of this Timecode instance. It can be skipped and
then the frames attribute will define the timecode, and if it is also
skipped then the start_second attribute will define the start
timecode, and if start_seconds is also skipped then the default value
of '00:00:00:00' will be used.
:type start_timecode: str or None
:param start_seconds: A float or integer value showing the seconds.
:param int frames: Timecode objects can be initialized with an
integer number showing the total frames.
"""
self.drop_frame = False
self._int_framerate = None
self._framerate = None
self.framerate = framerate
self.frames = None
# attribute override order
# start_timecode > frames > start_seconds
if start_timecode:
self.frames = self.tc_to_frames(start_timecode)
else:
if frames is not None: # because 0==False, and frames can be 0
self.frames = frames
elif start_seconds is not None:
self.frames = self.float_to_tc(start_seconds)
else:
# use default value of 00:00:00:00
self.frames = self.tc_to_frames('00:00:00:00')
@property
def framerate(self):
"""getter for _framerate attribute
"""
return self._framerate
@framerate.setter
def framerate(self, framerate):
"""setter for the framerate attribute
:param framerate:
:return:
"""
# set the int_frame_rate
if framerate == '29.97':
self._int_framerate = 30
self.drop_frame = True
elif framerate == '59.94':
self._int_framerate = 60
self.drop_frame = True
elif framerate == '23.98':
self._int_framerate = 24
elif framerate == 'ms':
self._int_framerate = 1000
framerate = 1000
elif framerate == 'frames':
self._int_framerate = 1
else:
self._int_framerate = int(framerate)
self._framerate = framerate
def set_timecode(self, timecode):
"""Sets the frames by using the given timecode
"""
self.frames = self.tc_to_frames(timecode)
def float_to_tc(self, seconds):
"""set the frames by using the given seconds
"""
return int(seconds * self._int_framerate)
def frames_to_tc(self, frames):
"""Converts frames back to timecode
:returns str: the string representation of the current time code
"""
if frames == 0:
return 0, 0, 0, 0
ffps = float(self._framerate)
if self.drop_frame:
# Number of frames to drop on the minute marks is the nearest
# integer to 6% of the framerate
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# Number of frames in an hour
frames_per_hour = int(round(ffps * 60 * 60))
# Number of frames in a day - timecode rolls over after 24 hours
frames_per_24_hours = frames_per_hour * 24
# Number of frames per ten minutes
frames_per_10_minutes = int(round(ffps * 60 * 10))
# Number of frames per minute is the round of the framerate * 60 minus
# the number of dropped frames
frames_per_minute = int(round(ffps)*60) - drop_frames
frame_number = frames - 1
if frame_number < 0:
# Negative time. Add 24 hours.
frame_number += frames_per_24_hours
# If frame_number is greater than 24 hrs, next operation will rollover
# clock
frame_number %= frames_per_24_hours
if self.drop_frame:
d = frame_number // frames_per_10_minutes
m = frame_number % frames_per_10_minutes
if m > drop_frames:
frame_number += (drop_frames * 9 * d) + \
drop_frames * ((m - drop_frames) // frames_per_minute)
else:
frame_number += drop_frames * 9 * d
ifps = self._int_framerate
frs = frame_number % ifps
secs = (frame_number // ifps) % 60
mins = ((frame_number // ifps) // 60) % 60
hrs = (((frame_number // ifps) // 60) // 60)
return hrs, mins, secs, frs
@classmethod
def parse_timecode(cls, timecode):
"""parses timecode string frames '00:00:00:00' or '00:00:00;00' or
milliseconds '00:00:00:000'
"""
bfr = timecode.replace(';', ':').replace('.', ':').split(':')
hrs = int(bfr[0])
mins = int(bfr[1])
secs = int(bfr[2])
frs = int(bfr[3])
return hrs, mins, secs, frs
def __iter__(self):
return self
def next(self):
self.add_frames(1)
return self
def back(self):
self.sub_frames(1)
return self
def add_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.frames += frames
def sub_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.add_frames(-frames)
def mult_frames(self, frames):
"""multiply frames
"""
self.frames *= frames
def div_frames(self, frames):
"""adds or subtracts frames number of frames"""
self.frames = self.frames / frames
def __eq__(self, other):
"""the overridden equality operator
"""
if isinstance(other, Timecode):
return self._framerate == other._framerate and \
self.frames == other.frames
elif isinstance(other, str):
new_tc = Timecode(self._framerate, other)
return self.__eq__(new_tc)
elif isinstance(other, int):
return self.frames == other
def __add__(self, other):
"""returns new Timecode instance with the given timecode or frames
added to this one
"""
# duplicate current one
tc = Timecode(self._framerate, frames=self.frames)
if isinstance(other, Timecode):
tc.add_frames(other.frames)
elif isinstance(other, int):
tc.add_frames(other)
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return tc
def __sub__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
subtracted_frames = self.frames - other.frames
elif isinstance(other, int):
subtracted_frames = self.frames - other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=subtracted_frames)
def __mul__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
multiplied_frames = self.frames * other.frames
elif isinstance(other, int):
multiplied_frames = self.frames * other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=multiplied_frames)
def __div__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
div_frames = self.frames / other.frames
elif isinstance(other, int):
div_frames = self.frames / other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=div_frames)
def __repr__(self):
return "%02d:%02d:%02d:%02d" % \
self.frames_to_tc(self.frames)
@property
def hrs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return hrs
@property
def mins(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return mins
@property
def secs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return secs
@property
def frs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return frs
@property
def frame_number(self):
"""returns the 0 based frame number of the current timecode instance
"""
return self.frames - 1
|
antiboredom/videogrep | videogrep/timecode.py | Timecode.frames_to_tc | python | def frames_to_tc(self, frames):
if frames == 0:
return 0, 0, 0, 0
ffps = float(self._framerate)
if self.drop_frame:
# Number of frames to drop on the minute marks is the nearest
# integer to 6% of the framerate
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# Number of frames in an hour
frames_per_hour = int(round(ffps * 60 * 60))
# Number of frames in a day - timecode rolls over after 24 hours
frames_per_24_hours = frames_per_hour * 24
# Number of frames per ten minutes
frames_per_10_minutes = int(round(ffps * 60 * 10))
# Number of frames per minute is the round of the framerate * 60 minus
# the number of dropped frames
frames_per_minute = int(round(ffps)*60) - drop_frames
frame_number = frames - 1
if frame_number < 0:
# Negative time. Add 24 hours.
frame_number += frames_per_24_hours
# If frame_number is greater than 24 hrs, next operation will rollover
# clock
frame_number %= frames_per_24_hours
if self.drop_frame:
d = frame_number // frames_per_10_minutes
m = frame_number % frames_per_10_minutes
if m > drop_frames:
frame_number += (drop_frames * 9 * d) + \
drop_frames * ((m - drop_frames) // frames_per_minute)
else:
frame_number += drop_frames * 9 * d
ifps = self._int_framerate
frs = frame_number % ifps
secs = (frame_number // ifps) % 60
mins = ((frame_number // ifps) // 60) % 60
hrs = (((frame_number // ifps) // 60) // 60)
return hrs, mins, secs, frs | Converts frames back to timecode
:returns str: the string representation of the current time code | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/timecode.py#L150-L203 | null | class Timecode(object):
def __init__(self, framerate, start_timecode=None, start_seconds=None,
frames=None):
"""The main timecode class.
Does all the calculation over frames, so the main data it holds is
frames, then when required it converts the frames to a timecode by
using the frame rate setting.
:param str framerate: The frame rate of the Timecode instance. It
should be one of ['23.98', '24', '25', '29.97', '30', '50', '59.94',
'60', 'ms'] where "ms" equals to 1000 fps. Can not be skipped.
Setting the framerate will automatically set the :attr:`.drop_frame`
attribute to correct value.
:param start_timecode: The start timecode. Use this to be able to
set the timecode of this Timecode instance. It can be skipped and
then the frames attribute will define the timecode, and if it is also
skipped then the start_second attribute will define the start
timecode, and if start_seconds is also skipped then the default value
of '00:00:00:00' will be used.
:type start_timecode: str or None
:param start_seconds: A float or integer value showing the seconds.
:param int frames: Timecode objects can be initialized with an
integer number showing the total frames.
"""
self.drop_frame = False
self._int_framerate = None
self._framerate = None
self.framerate = framerate
self.frames = None
# attribute override order
# start_timecode > frames > start_seconds
if start_timecode:
self.frames = self.tc_to_frames(start_timecode)
else:
if frames is not None: # because 0==False, and frames can be 0
self.frames = frames
elif start_seconds is not None:
self.frames = self.float_to_tc(start_seconds)
else:
# use default value of 00:00:00:00
self.frames = self.tc_to_frames('00:00:00:00')
@property
def framerate(self):
"""getter for _framerate attribute
"""
return self._framerate
@framerate.setter
def framerate(self, framerate):
"""setter for the framerate attribute
:param framerate:
:return:
"""
# set the int_frame_rate
if framerate == '29.97':
self._int_framerate = 30
self.drop_frame = True
elif framerate == '59.94':
self._int_framerate = 60
self.drop_frame = True
elif framerate == '23.98':
self._int_framerate = 24
elif framerate == 'ms':
self._int_framerate = 1000
framerate = 1000
elif framerate == 'frames':
self._int_framerate = 1
else:
self._int_framerate = int(framerate)
self._framerate = framerate
def set_timecode(self, timecode):
"""Sets the frames by using the given timecode
"""
self.frames = self.tc_to_frames(timecode)
def float_to_tc(self, seconds):
"""set the frames by using the given seconds
"""
return int(seconds * self._int_framerate)
def tc_to_frames(self, timecode):
"""Converts the given timecode to frames
"""
hours, minutes, seconds, frames = map(int, timecode.split(':'))
ffps = float(self._framerate)
if self.drop_frame:
# Number of drop frames is 6% of framerate rounded to nearest
# integer
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# We don't need the exact framerate anymore, we just need it rounded to
# nearest integer
ifps = self._int_framerate
# Number of frames per hour (non-drop)
hour_frames = ifps * 60 * 60
# Number of frames per minute (non-drop)
minute_frames = ifps * 60
# Total number of minutes
total_minutes = (60 * hours) + minutes
frame_number = \
((hour_frames * hours) + (minute_frames * minutes) +
(ifps * seconds) + frames) - \
(drop_frames * (total_minutes - (total_minutes // 10)))
frames = frame_number + 1
return frames
@classmethod
def parse_timecode(cls, timecode):
"""parses timecode string frames '00:00:00:00' or '00:00:00;00' or
milliseconds '00:00:00:000'
"""
bfr = timecode.replace(';', ':').replace('.', ':').split(':')
hrs = int(bfr[0])
mins = int(bfr[1])
secs = int(bfr[2])
frs = int(bfr[3])
return hrs, mins, secs, frs
def __iter__(self):
return self
def next(self):
self.add_frames(1)
return self
def back(self):
self.sub_frames(1)
return self
def add_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.frames += frames
def sub_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.add_frames(-frames)
def mult_frames(self, frames):
"""multiply frames
"""
self.frames *= frames
def div_frames(self, frames):
"""adds or subtracts frames number of frames"""
self.frames = self.frames / frames
def __eq__(self, other):
"""the overridden equality operator
"""
if isinstance(other, Timecode):
return self._framerate == other._framerate and \
self.frames == other.frames
elif isinstance(other, str):
new_tc = Timecode(self._framerate, other)
return self.__eq__(new_tc)
elif isinstance(other, int):
return self.frames == other
def __add__(self, other):
"""returns new Timecode instance with the given timecode or frames
added to this one
"""
# duplicate current one
tc = Timecode(self._framerate, frames=self.frames)
if isinstance(other, Timecode):
tc.add_frames(other.frames)
elif isinstance(other, int):
tc.add_frames(other)
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return tc
def __sub__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
subtracted_frames = self.frames - other.frames
elif isinstance(other, int):
subtracted_frames = self.frames - other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=subtracted_frames)
def __mul__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
multiplied_frames = self.frames * other.frames
elif isinstance(other, int):
multiplied_frames = self.frames * other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=multiplied_frames)
def __div__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
div_frames = self.frames / other.frames
elif isinstance(other, int):
div_frames = self.frames / other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=div_frames)
def __repr__(self):
return "%02d:%02d:%02d:%02d" % \
self.frames_to_tc(self.frames)
@property
def hrs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return hrs
@property
def mins(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return mins
@property
def secs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return secs
@property
def frs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return frs
@property
def frame_number(self):
"""returns the 0 based frame number of the current timecode instance
"""
return self.frames - 1
|
antiboredom/videogrep | videogrep/timecode.py | Timecode.parse_timecode | python | def parse_timecode(cls, timecode):
bfr = timecode.replace(';', ':').replace('.', ':').split(':')
hrs = int(bfr[0])
mins = int(bfr[1])
secs = int(bfr[2])
frs = int(bfr[3])
return hrs, mins, secs, frs | parses timecode string frames '00:00:00:00' or '00:00:00;00' or
milliseconds '00:00:00:000' | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/timecode.py#L206-L215 | null | class Timecode(object):
def __init__(self, framerate, start_timecode=None, start_seconds=None,
frames=None):
"""The main timecode class.
Does all the calculation over frames, so the main data it holds is
frames, then when required it converts the frames to a timecode by
using the frame rate setting.
:param str framerate: The frame rate of the Timecode instance. It
should be one of ['23.98', '24', '25', '29.97', '30', '50', '59.94',
'60', 'ms'] where "ms" equals to 1000 fps. Can not be skipped.
Setting the framerate will automatically set the :attr:`.drop_frame`
attribute to correct value.
:param start_timecode: The start timecode. Use this to be able to
set the timecode of this Timecode instance. It can be skipped and
then the frames attribute will define the timecode, and if it is also
skipped then the start_second attribute will define the start
timecode, and if start_seconds is also skipped then the default value
of '00:00:00:00' will be used.
:type start_timecode: str or None
:param start_seconds: A float or integer value showing the seconds.
:param int frames: Timecode objects can be initialized with an
integer number showing the total frames.
"""
self.drop_frame = False
self._int_framerate = None
self._framerate = None
self.framerate = framerate
self.frames = None
# attribute override order
# start_timecode > frames > start_seconds
if start_timecode:
self.frames = self.tc_to_frames(start_timecode)
else:
if frames is not None: # because 0==False, and frames can be 0
self.frames = frames
elif start_seconds is not None:
self.frames = self.float_to_tc(start_seconds)
else:
# use default value of 00:00:00:00
self.frames = self.tc_to_frames('00:00:00:00')
@property
def framerate(self):
"""getter for _framerate attribute
"""
return self._framerate
@framerate.setter
def framerate(self, framerate):
"""setter for the framerate attribute
:param framerate:
:return:
"""
# set the int_frame_rate
if framerate == '29.97':
self._int_framerate = 30
self.drop_frame = True
elif framerate == '59.94':
self._int_framerate = 60
self.drop_frame = True
elif framerate == '23.98':
self._int_framerate = 24
elif framerate == 'ms':
self._int_framerate = 1000
framerate = 1000
elif framerate == 'frames':
self._int_framerate = 1
else:
self._int_framerate = int(framerate)
self._framerate = framerate
def set_timecode(self, timecode):
"""Sets the frames by using the given timecode
"""
self.frames = self.tc_to_frames(timecode)
def float_to_tc(self, seconds):
"""set the frames by using the given seconds
"""
return int(seconds * self._int_framerate)
def tc_to_frames(self, timecode):
"""Converts the given timecode to frames
"""
hours, minutes, seconds, frames = map(int, timecode.split(':'))
ffps = float(self._framerate)
if self.drop_frame:
# Number of drop frames is 6% of framerate rounded to nearest
# integer
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# We don't need the exact framerate anymore, we just need it rounded to
# nearest integer
ifps = self._int_framerate
# Number of frames per hour (non-drop)
hour_frames = ifps * 60 * 60
# Number of frames per minute (non-drop)
minute_frames = ifps * 60
# Total number of minutes
total_minutes = (60 * hours) + minutes
frame_number = \
((hour_frames * hours) + (minute_frames * minutes) +
(ifps * seconds) + frames) - \
(drop_frames * (total_minutes - (total_minutes // 10)))
frames = frame_number + 1
return frames
def frames_to_tc(self, frames):
"""Converts frames back to timecode
:returns str: the string representation of the current time code
"""
if frames == 0:
return 0, 0, 0, 0
ffps = float(self._framerate)
if self.drop_frame:
# Number of frames to drop on the minute marks is the nearest
# integer to 6% of the framerate
drop_frames = int(round(ffps * .066666))
else:
drop_frames = 0
# Number of frames in an hour
frames_per_hour = int(round(ffps * 60 * 60))
# Number of frames in a day - timecode rolls over after 24 hours
frames_per_24_hours = frames_per_hour * 24
# Number of frames per ten minutes
frames_per_10_minutes = int(round(ffps * 60 * 10))
# Number of frames per minute is the round of the framerate * 60 minus
# the number of dropped frames
frames_per_minute = int(round(ffps)*60) - drop_frames
frame_number = frames - 1
if frame_number < 0:
# Negative time. Add 24 hours.
frame_number += frames_per_24_hours
# If frame_number is greater than 24 hrs, next operation will rollover
# clock
frame_number %= frames_per_24_hours
if self.drop_frame:
d = frame_number // frames_per_10_minutes
m = frame_number % frames_per_10_minutes
if m > drop_frames:
frame_number += (drop_frames * 9 * d) + \
drop_frames * ((m - drop_frames) // frames_per_minute)
else:
frame_number += drop_frames * 9 * d
ifps = self._int_framerate
frs = frame_number % ifps
secs = (frame_number // ifps) % 60
mins = ((frame_number // ifps) // 60) % 60
hrs = (((frame_number // ifps) // 60) // 60)
return hrs, mins, secs, frs
@classmethod
def __iter__(self):
return self
def next(self):
self.add_frames(1)
return self
def back(self):
self.sub_frames(1)
return self
def add_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.frames += frames
def sub_frames(self, frames):
"""adds or subtracts frames number of frames
"""
self.add_frames(-frames)
def mult_frames(self, frames):
"""multiply frames
"""
self.frames *= frames
def div_frames(self, frames):
"""adds or subtracts frames number of frames"""
self.frames = self.frames / frames
def __eq__(self, other):
"""the overridden equality operator
"""
if isinstance(other, Timecode):
return self._framerate == other._framerate and \
self.frames == other.frames
elif isinstance(other, str):
new_tc = Timecode(self._framerate, other)
return self.__eq__(new_tc)
elif isinstance(other, int):
return self.frames == other
def __add__(self, other):
"""returns new Timecode instance with the given timecode or frames
added to this one
"""
# duplicate current one
tc = Timecode(self._framerate, frames=self.frames)
if isinstance(other, Timecode):
tc.add_frames(other.frames)
elif isinstance(other, int):
tc.add_frames(other)
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return tc
def __sub__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
subtracted_frames = self.frames - other.frames
elif isinstance(other, int):
subtracted_frames = self.frames - other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=subtracted_frames)
def __mul__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
multiplied_frames = self.frames * other.frames
elif isinstance(other, int):
multiplied_frames = self.frames * other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=multiplied_frames)
def __div__(self, other):
"""returns new Timecode object with added timecodes"""
if isinstance(other, Timecode):
div_frames = self.frames / other.frames
elif isinstance(other, int):
div_frames = self.frames / other
else:
raise TimecodeError(
'Type %s not supported for arithmetic.' %
other.__class__.__name__
)
return Timecode(self._framerate, start_timecode=None,
frames=div_frames)
def __repr__(self):
return "%02d:%02d:%02d:%02d" % \
self.frames_to_tc(self.frames)
@property
def hrs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return hrs
@property
def mins(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return mins
@property
def secs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return secs
@property
def frs(self):
hrs, mins, secs, frs = self.frames_to_tc(self.frames)
return frs
@property
def frame_number(self):
"""returns the 0 based frame number of the current timecode instance
"""
return self.frames - 1
|
antiboredom/videogrep | videogrep/videogrep.py | get_ngrams | python | def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams | Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L33-L66 | [
"def clean_srt(srt):\n \"\"\"Remove damaging line breaks and numbers from srt files and return a\n dictionary.\n \"\"\"\n with open(srt, 'r') as f:\n text = f.read()\n text = re.sub(r'^\\d+[\\n\\r]', '', text, flags=re.MULTILINE)\n lines = text.splitlines()\n output = OrderedDict()\n ... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | make_edl | python | def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out) | Converts an array of ordered timestamps into an EDL string | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L91-L123 | [
"def get_fps(filename):\n process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n returncode = process.wait()\n output = process.stdout.read()\n fps = re.findall(r'\\d+ fps', output, flags=re.MULTILINE)\n try:\n return int(fps[0].split(' ')[0]... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | convert_timespan | python | def convert_timespan(timespan):
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end | Convert an srt timespan into a start and end timestamp. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L131-L136 | [
"def convert_timestamp(timestamp):\n \"\"\"Convert an srt timestamp into seconds.\"\"\"\n timestamp = timestamp.strip()\n chunk, millis = timestamp.split(',')\n hours, minutes, seconds = chunk.split(':')\n hours = int(hours)\n minutes = int(minutes)\n seconds = int(seconds)\n seconds = secon... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | convert_timestamp | python | def convert_timestamp(timestamp):
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds | Convert an srt timestamp into seconds. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L139-L148 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | clean_srt | python | def clean_srt(srt):
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output | Remove damaging line breaks and numbers from srt files and return a
dictionary. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L151-L171 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | cleanup_log_files | python | def cleanup_log_files(outputfile):
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f) | Search for and remove temp log files found in the output directory. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L174-L179 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | demo_supercut | python | def demo_supercut(composition, padding):
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end)) | Print out timespans to be cut followed by the line number in the srt. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L182-L190 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | create_supercut | python | def create_supercut(composition, outputfile, padding):
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac') | Concatenate video clips together and output finished video file to the
output directory. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L193-L214 | [
"def demo_supercut(composition, padding):\n \"\"\"Print out timespans to be cut followed by the line number in the srt.\"\"\"\n for i, c in enumerate(composition):\n line = c['line']\n start = c['start']\n end = c['end']\n if i > 0 and composition[i - 1]['file'] == c['file'] and st... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | create_supercut_in_batches | python | def create_supercut_in_batches(composition, outputfile, padding):
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile) | Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L218-L248 | [
"def create_supercut(composition, outputfile, padding):\n \"\"\"Concatenate video clips together and output finished video file to the\n output directory.\n \"\"\"\n print(\"[+] Creating clips.\")\n demo_supercut(composition, padding)\n\n # add padding when necessary\n for (clip, nextclip) in z... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | search_line | python | def search_line(line, search, searchtype):
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search) | Return True if search term is found in given line, False otherwise. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L263-L270 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | get_subtitle_files | python | def get_subtitle_files(inputfile):
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts | Return a list of subtitle files. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L273-L288 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | get_vtt_files | python | def get_vtt_files(inputfile):
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts | Return a list of vtt files. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L291-L306 | null | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | compose_from_srts | python | def compose_from_srts(srts, search, searchtype):
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition | Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L309-L371 | [
"def clean_srt(srt):\n \"\"\"Remove damaging line breaks and numbers from srt files and return a\n dictionary.\n \"\"\"\n with open(srt, 'r') as f:\n text = f.read()\n text = re.sub(r'^\\d+[\\n\\r]', '', text, flags=re.MULTILINE)\n lines = text.splitlines()\n output = OrderedDict()\n ... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | compose_from_transcript | python | def compose_from_transcript(files, search, searchtype):
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments | Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L374-L402 | [
"def search_line(line, search, searchtype):\n \"\"\"Return True if search term is found in given line, False otherwise.\"\"\"\n if searchtype == 're' or searchtype == 'word':\n return re.search(search, line) #, re.IGNORECASE)\n elif searchtype == 'pos':\n return searcher.search_out(line, sea... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
"""Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts.
"""
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding)
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/videogrep.py | videogrep | python | def videogrep(inputfile, outputfile, search, searchtype, maxclips=0, padding=0, test=False, randomize=False, sync=0, use_transcript=False, use_vtt=False, export_clips=False):
padding = padding / 1000.0
sync = sync / 1000.0
composition = []
foundSearchTerm = False
if use_transcript:
composition = compose_from_transcript(inputfile, search, searchtype)
elif use_vtt:
vtts = get_vtt_files(inputfile)
composition = compose_from_vtt(vtts, search, searchtype)
else:
srts = get_subtitle_files(inputfile)
composition = compose_from_srts(srts, search, searchtype)
# If the search term was not found in any subtitle file...
if len(composition) == 0:
print("[!] Search term '" + search + "'" + " was not found in any file.")
exit(1)
else:
print("[+] Search term '" + search + "'" + " was found in " + str(len(composition)) + " places.")
# apply padding and sync
for c in composition:
c['start'] = c['start'] + sync - padding
c['end'] = c['end'] + sync + padding
if maxclips > 0:
composition = composition[:maxclips]
if randomize is True:
random.shuffle(composition)
if test is True:
demo_supercut(composition, padding)
else:
if os.path.splitext(outputfile)[1].lower() == '.edl':
make_edl(composition, outputfile)
elif export_clips:
split_clips(composition, outputfile)
else:
if len(composition) > BATCH_SIZE:
print("[+} Starting batch job.")
create_supercut_in_batches(composition, outputfile, padding)
else:
create_supercut(composition, outputfile, padding) | Search through and find all instances of the search term in an srt or transcript,
create a supercut around that instance, and output a new video file
comprised of those supercuts. | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/videogrep.py#L438-L490 | [
"def create_supercut_in_batches(composition, outputfile, padding):\n \"\"\"Create & concatenate video clips in groups of size BATCH_SIZE and output\n finished video file to output directory.\n \"\"\"\n total_clips = len(composition)\n start_index = 0\n end_index = BATCH_SIZE\n batch_comp = []\n... | from __future__ import print_function
import os
import re
import random
import gc
import subprocess
from glob import glob
from collections import OrderedDict
from moviepy.editor import VideoFileClip, concatenate
import audiogrep
from .vtt import parse_auto_sub
from .timecode import Timecode
from . import searcher
usable_extensions = ['mp4', 'avi', 'mov', 'mkv', 'm4v']
BATCH_SIZE = 20
def get_fps(filename):
process = subprocess.Popen(['ffmpeg', '-i', filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
returncode = process.wait()
output = process.stdout.read()
fps = re.findall(r'\d+ fps', output, flags=re.MULTILINE)
try:
return int(fps[0].split(' ')[0])
except:
return 25
def get_ngrams(inputfile, n=1, use_transcript=False, use_vtt=False):
'''
Get ngrams from a text
Sourced from:
https://gist.github.com/dannguyen/93c2c43f4e65328b85af
'''
words = []
if use_transcript:
for s in audiogrep.convert_timestamps(inputfile):
for w in s['words']:
words.append(w[0])
elif use_vtt:
vtts = get_vtt_files(inputfile)
for vtt in vtts:
with open(vtt['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for s in sentences:
for w in s['words']:
words.append(w['word'])
else:
text = ''
srts = get_subtitle_files(inputfile)
for srt in srts:
lines = clean_srt(srt)
if lines:
for timespan in lines.keys():
line = lines[timespan].strip()
text += line + ' '
words = re.split(r'[.?!,:\"]+\s*|\s+', text)
ngrams = zip(*[words[i:] for i in range(n)])
return ngrams
def make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=25):
reel = full_name
if len(full_name) > 7:
reel = full_name[0:7]
template = '{} {} AA/V C {} {} {} {}\n* FROM CLIP NAME: {}\n* COMMENT: \n FINAL CUT PRO REEL: {} REPLACED BY: {}\n\n'
out = template.format(
n,
full_name,
Timecode(fps, start_seconds=time_in),
Timecode(fps, start_seconds=time_out),
Timecode(fps, start_seconds=rec_in),
Timecode(fps, start_seconds=rec_out),
filename,
full_name,
reel
)
return out
def make_edl(timestamps, name):
'''Converts an array of ordered timestamps into an EDL string'''
fpses = {}
out = "TITLE: {}\nFCM: NON-DROP FRAME\n\n".format(name)
rec_in = 0
for index, timestamp in enumerate(timestamps):
if timestamp['file'] not in fpses:
fpses[timestamp['file']] = get_fps(timestamp['file'])
fps = fpses[timestamp['file']]
n = str(index + 1).zfill(4)
time_in = timestamp['start']
time_out = timestamp['end']
duration = time_out - time_in
rec_out = rec_in + duration
full_name = 'reel_{}'.format(n)
filename = timestamp['file']
out += make_edl_segment(n, time_in, time_out, rec_in, rec_out, full_name, filename, fps=fps)
rec_in = rec_out
with open(name, 'w') as outfile:
outfile.write(out)
def create_timestamps(inputfiles):
files = audiogrep.convert_to_wav(inputfiles)
audiogrep.transcribe(files)
def convert_timespan(timespan):
"""Convert an srt timespan into a start and end timestamp."""
start, end = timespan.split('-->')
start = convert_timestamp(start)
end = convert_timestamp(end)
return start, end
def convert_timestamp(timestamp):
"""Convert an srt timestamp into seconds."""
timestamp = timestamp.strip()
chunk, millis = timestamp.split(',')
hours, minutes, seconds = chunk.split(':')
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
seconds = seconds + hours * 60 * 60 + minutes * 60 + float(millis) / 1000
return seconds
def clean_srt(srt):
"""Remove damaging line breaks and numbers from srt files and return a
dictionary.
"""
with open(srt, 'r') as f:
text = f.read()
text = re.sub(r'^\d+[\n\r]', '', text, flags=re.MULTILINE)
lines = text.splitlines()
output = OrderedDict()
key = ''
for line in lines:
line = line.strip()
if line.find('-->') > -1:
key = line
output[key] = ''
else:
if key != '':
output[key] += line + ' '
return output
def cleanup_log_files(outputfile):
"""Search for and remove temp log files found in the output directory."""
d = os.path.dirname(os.path.abspath(outputfile))
logfiles = [f for f in os.listdir(d) if f.endswith('ogg.log')]
for f in logfiles:
os.remove(f)
def demo_supercut(composition, padding):
"""Print out timespans to be cut followed by the line number in the srt."""
for i, c in enumerate(composition):
line = c['line']
start = c['start']
end = c['end']
if i > 0 and composition[i - 1]['file'] == c['file'] and start < composition[i - 1]['end']:
start = start + padding
print("{1} to {2}:\t{0}".format(line, start, end))
def create_supercut(composition, outputfile, padding):
"""Concatenate video clips together and output finished video file to the
output directory.
"""
print("[+] Creating clips.")
demo_supercut(composition, padding)
# add padding when necessary
for (clip, nextclip) in zip(composition, composition[1:]):
if ((nextclip['file'] == clip['file']) and (nextclip['start'] < clip['end'])):
nextclip['start'] += padding
# put all clips together:
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
print("[+] Concatenating clips.")
final_clip = concatenate(cut_clips)
print("[+] Writing ouput file.")
final_clip.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def create_supercut_in_batches(composition, outputfile, padding):
"""Create & concatenate video clips in groups of size BATCH_SIZE and output
finished video file to output directory.
"""
total_clips = len(composition)
start_index = 0
end_index = BATCH_SIZE
batch_comp = []
while start_index < total_clips:
filename = outputfile + '.tmp' + str(start_index) + '.mp4'
try:
create_supercut(composition[start_index:end_index], filename, padding)
batch_comp.append(filename)
gc.collect()
start_index += BATCH_SIZE
end_index += BATCH_SIZE
except:
start_index += BATCH_SIZE
end_index += BATCH_SIZE
next
clips = [VideoFileClip(filename) for filename in batch_comp]
video = concatenate(clips)
video.to_videofile(outputfile, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
# remove partial video files
for filename in batch_comp:
os.remove(filename)
cleanup_log_files(outputfile)
def split_clips(composition, outputfile):
all_filenames = set([c['file'] for c in composition])
videofileclips = dict([(f, VideoFileClip(f)) for f in all_filenames])
cut_clips = [videofileclips[c['file']].subclip(c['start'], c['end']) for c in composition]
basename, ext = os.path.splitext(outputfile)
print("[+] Writing ouput files.")
for i, clip in enumerate(cut_clips):
clipfilename = basename + '_' + str(i).zfill(5) + ext
clip.to_videofile(clipfilename, codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def search_line(line, search, searchtype):
"""Return True if search term is found in given line, False otherwise."""
if searchtype == 're' or searchtype == 'word':
return re.search(search, line) #, re.IGNORECASE)
elif searchtype == 'pos':
return searcher.search_out(line, search)
elif searchtype == 'hyper':
return searcher.hypernym_search(line, search)
def get_subtitle_files(inputfile):
"""Return a list of subtitle files."""
srts = []
for f in inputfile:
filename = f.split('.')
filename[-1] = 'srt'
srt = '.'.join(filename)
if os.path.isfile(srt):
srts.append(srt)
if len(srts) == 0:
print("[!] No subtitle files were found.")
return False
return srts
def get_vtt_files(inputfile):
"""Return a list of vtt files."""
vtts = []
for f in inputfile:
filename = f.split('.')
filename = '.'.join(filename[0:-1])
vtt = glob(filename + '*.vtt')
if len(vtt) > 0:
vtts.append({'vtt': vtt[0], 'video': f})
if len(vtts) == 0:
print("[!] No vtt files were found.")
return False
return vtts
def compose_from_srts(srts, search, searchtype):
"""Takes a list of subtitle (srt) filenames, search term and search type
and, returns a list of timestamps for composing a supercut.
"""
composition = []
foundSearchTerm = False
# Iterate over each subtitles file.
for srt in srts:
print(srt)
lines = clean_srt(srt)
videofile = ""
foundVideoFile = False
print("[+] Searching for video file corresponding to '" + srt + "'.")
for ext in usable_extensions:
tempVideoFile = srt.replace('.srt', '.' + ext)
if os.path.isfile(tempVideoFile):
videofile = tempVideoFile
foundVideoFile = True
print("[+] Found '" + tempVideoFile + "'.")
# If a correspndong video file was found for this subtitles file...
if foundVideoFile:
# Check that the subtitles file contains subtitles.
if lines:
# Iterate over each line in the current subtitles file.
for timespan in lines.keys():
line = lines[timespan].strip()
# If this line contains the search term
if search_line(line, search, searchtype):
foundSearchTerm = True
# Extract the timespan for this subtitle.
start, end = convert_timespan(timespan)
# Record this occurance of the search term.
composition.append({'file': videofile, 'time': timespan, 'start': start, 'end': end, 'line': line})
# If the search was unsuccessful.
if foundSearchTerm is False:
print("[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'.")
# If no subtitles were found in the current file.
else:
print("[!] Subtitle file '" + srt + "' is empty.")
# If no video file was found...
else:
print("[!] No video file was found which corresponds to subtitle file '" + srt + "'.")
print("[!] The following video formats are currently supported:")
extList = ""
for ext in usable_extensions:
extList += ext + ", "
print(extList)
return composition
def compose_from_transcript(files, search, searchtype):
"""Takes transcripts created by audiogrep/pocketsphinx, a search and search type
and returns a list of timestamps for creating a supercut"""
final_segments = []
if searchtype in ['re', 'word', 'franken', 'fragment']:
if searchtype == 're':
searchtype = 'sentence'
segments = audiogrep.search(search, files, mode=searchtype, regex=True)
for seg in segments:
seg['file'] = seg['file'].replace('.transcription.txt', '')
seg['line'] = seg['words']
final_segments.append(seg)
elif searchtype in ['hyper', 'pos']:
for s in audiogrep.convert_timestamps(files):
for w in s['words']:
if search_line(w[0], search, searchtype):
seg = {
'file': s['file'].replace('.transcription.txt',''),
'line': w[0],
'start': float(w[1]),
'end': float(w[2])
}
final_segments.append(seg)
return final_segments
def compose_from_vtt(files, search, searchtype):
final_segments = []
for f in files:
video = f['video']
with open(f['vtt'], 'r') as infile:
sentences = parse_auto_sub(infile.read())
for sentence in sentences:
if searchtype in ['word', 'hyper', 'pos']:
for word in sentence['words']:
if search_line(word['word'], search, searchtype):
seg = {
'file': video,
'line': word['word'],
'start': word['start'],
'end': word['end']
}
final_segments.append(seg)
else:
if search_line(sentence['text'], search, searchtype):
seg = {
'file': video,
'line': sentence['text'],
'start': sentence['start'],
'end': sentence['end']
}
final_segments.append(seg)
return final_segments
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate a "supercut" of one or more video files by searching through subtitle tracks.')
parser.add_argument('--input', '-i', dest='inputfile', nargs='*', required=True, help='video or subtitle file, or folder')
parser.add_argument('--search', '-s', dest='search', help='search term')
parser.add_argument('--search-type', '-st', dest='searchtype', default='re', choices=['re', 'pos', 'hyper', 'fragment', 'franken', 'word'], help='type of search')
parser.add_argument('--use-transcript', '-t', action='store_true', dest='use_transcript', help='Use a transcript generated by pocketsphinx instead of srt files')
parser.add_argument('--use-vtt', '-vtt', action='store_true', dest='use_vtt', help='Use a vtt file instead of srt')
parser.add_argument('--max-clips', '-m', dest='maxclips', type=int, default=0, help='maximum number of clips to use for the supercut')
parser.add_argument('--output', '-o', dest='outputfile', default='supercut.mp4', help='name of output file')
parser.add_argument('--export-clips', '-ec', dest='export_clips', action='store_true', help='Export individual clips')
parser.add_argument('--demo', '-d', action='store_true', help='show results without making the supercut')
parser.add_argument('--randomize', '-r', action='store_true', help='randomize the clips')
parser.add_argument('--youtube', '-yt', help='grab clips from youtube based on your search')
parser.add_argument('--padding', '-p', dest='padding', default=0, type=int, help='padding in milliseconds to add to the start and end of each clip')
parser.add_argument('--resyncsubs', '-rs', dest='sync', default=0, type=int, help='Subtitle re-synch delay +/- in milliseconds')
parser.add_argument('--transcribe', '-tr', dest='transcribe', action='store_true', help='Transcribe the video using audiogrep. Requires pocketsphinx')
parser.add_argument('--ngrams', '-n', dest='ngrams', type=int, default=0, help='Return ngrams for videos')
args = parser.parse_args()
if not args.transcribe and args.ngrams == 0:
if args.search is None:
parser.error('argument --search/-s is required')
if args.transcribe:
create_timestamps(args.inputfile)
elif args.ngrams > 0:
from collections import Counter
grams = get_ngrams(args.inputfile, args.ngrams, args.use_transcript, args.use_vtt)
most_common = Counter(grams).most_common(100)
for ngram, count in most_common:
print(' '.join(ngram), count)
else:
videogrep(args.inputfile, args.outputfile, args.search, args.searchtype, args.maxclips, args.padding, args.demo, args.randomize, args.sync, args.use_transcript, args.use_vtt, args.export_clips)
if __name__ == '__main__':
main()
|
antiboredom/videogrep | videogrep/tools/getyoutubecc.py | getyoutubecc._parseXml | python | def _parseXml(self,cc):
htmlpar = HTMLParser.HTMLParser()
cc = cc.split("</text>") # ['<text start="2997.929">So, it will\nhas time', '<text start="3000.929">blah', ..]
captions = []
for line in cc:
if re.search('text', line):
time = re.search(r'start="(\d+)(?:\.(\d+)){0,1}', line).groups() # ('2997','929')
time = ( int(time[0]), int(0 if not time[1] else time[1]) )
#convert seconds and millisec to int
text = re.search(r'">(.*)', line, re.DOTALL).group(1) # extract text i.e. 'So, it will\nhas time'
textlines = [ htmlpar.unescape(htmlpar.unescape( unicode(lineunparsed,"utf-8") )) for lineunparsed in text.split('\n') ]
#unscape chars like & or '
ntime = {'hours':time[0]/3600,"min":time[0]%3600/60,"sec":time[0]%3600%60,"msec":time[1]}
captions.append({'time':ntime,'textlines':textlines})
return captions | INPUT: XML file with captions
OUTPUT: parsed object like:
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }] | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/tools/getyoutubecc.py#L57-L76 | null | class getyoutubecc():
""" This class allows you to download the caption from a video from you tube
Example:
>>> import getyoutubecc
#import the library
>>> cc = getyoutubecc.getyoutubecc('2XraaWefBd8','en')
# Now in cc.caption_obj are the parsed captions, its syntax is like:
# [{'texlines': [u"caption first line", 'caption second line'],
# 'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
# Modify the caption as you want if desired
>>> cc.writeSrtFile('captionsfile.srt')
#write the contents to a srt file
Note:
MULTITRACK VIDEO
if video is a multitrack video (or the track has a name) you need
to specify the name of the track:
>>> cc = getyoutubecc.getyoutubecc('pNiFoYt69-w','fr','french')
TRANSLATE VIDEO
if you prefer the automatic translation to another language use
the lang code
>>> cc = getyoutubecc.getyoutubecc('pNiFoYt69-w','fr','french', tlang:'es')
"""
caption_obj = {}
""" This object contains the fetched captions. Use this to treat the captions or whatever"""
def __init__(self, video_id, lang="en", track="", tlang="" ):
""" """
#Obtain the file from internet
cc_url = "http://www.youtube.com/api/timedtext?v=" + video_id + "&lang=" + lang + "&name=" + track + "&tlang=" + tlang
print "video id: " + video_id
print "video language: " + lang
print "video track: " + track
print "translate video to: " + tlang
try:
cc = urllib.urlopen(cc_url).read()
except:
print "Problem with connection"
#parse the file to make a easy to modify object with the captions and its time
if self.caption_obj == []:
print "url " + cc_url + " was an empty response. Multitrack video?"
self.caption_obj = self._parseXml(cc);
def writeSrtFile(self,filename="caption"):
srt_lines = self._generateSrt(self.caption_obj) #generate the srt file
srtfile = open(filename,'w')
for line in srt_lines:
srtfile.write( line.encode('utf8') + "\n")
def _generateSrt(self,captions):
""" INPUT: array with captions, i.e.
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
OUTPUT: srtformated string
"""
caption_number = 0
srt_output = []
for caption in captions:
caption_number += 1
#CAPTION NUMBER
srt_output.append(str(caption_number))
#TIME
time_from = ( caption['time']['hours'], caption['time']['min'], caption['time']['sec'], caption['time']['msec'] )
if len(captions)>caption_number:
#display caption until next one
next_caption_time = captions[caption_number]['time']
time_to = ( next_caption_time['hours'], next_caption_time['min'], next_caption_time['sec'], next_caption_time['msec'] )
else:
#display caption for 2 seconds
time_to = (time_from[0],time_from[1]+2,time_from[2],time_from[3])
srt_output.append( (":").join([str(i) for i in time_from[0:-1]])+","+str(time_from[-1])+" --> "+(":").join([str(i) for i in time_to[0:-1]])+","+str(time_to[-1]))
#CAPTIONS
for caption_line in caption['textlines']:
srt_output.append(caption_line)
#Add two empty lines to serarate every caption showed
srt_output.append("")
srt_output.append("")
return srt_output
|
antiboredom/videogrep | videogrep/tools/getyoutubecc.py | getyoutubecc._generateSrt | python | def _generateSrt(self,captions):
caption_number = 0
srt_output = []
for caption in captions:
caption_number += 1
#CAPTION NUMBER
srt_output.append(str(caption_number))
#TIME
time_from = ( caption['time']['hours'], caption['time']['min'], caption['time']['sec'], caption['time']['msec'] )
if len(captions)>caption_number:
#display caption until next one
next_caption_time = captions[caption_number]['time']
time_to = ( next_caption_time['hours'], next_caption_time['min'], next_caption_time['sec'], next_caption_time['msec'] )
else:
#display caption for 2 seconds
time_to = (time_from[0],time_from[1]+2,time_from[2],time_from[3])
srt_output.append( (":").join([str(i) for i in time_from[0:-1]])+","+str(time_from[-1])+" --> "+(":").join([str(i) for i in time_to[0:-1]])+","+str(time_to[-1]))
#CAPTIONS
for caption_line in caption['textlines']:
srt_output.append(caption_line)
#Add two empty lines to serarate every caption showed
srt_output.append("")
srt_output.append("")
return srt_output | INPUT: array with captions, i.e.
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
OUTPUT: srtformated string | train | https://github.com/antiboredom/videogrep/blob/faffd3446d96242677757f1af7db23b6dfc429cf/videogrep/tools/getyoutubecc.py#L78-L106 | null | class getyoutubecc():
""" This class allows you to download the caption from a video from you tube
Example:
>>> import getyoutubecc
#import the library
>>> cc = getyoutubecc.getyoutubecc('2XraaWefBd8','en')
# Now in cc.caption_obj are the parsed captions, its syntax is like:
# [{'texlines': [u"caption first line", 'caption second line'],
# 'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
# Modify the caption as you want if desired
>>> cc.writeSrtFile('captionsfile.srt')
#write the contents to a srt file
Note:
MULTITRACK VIDEO
if video is a multitrack video (or the track has a name) you need
to specify the name of the track:
>>> cc = getyoutubecc.getyoutubecc('pNiFoYt69-w','fr','french')
TRANSLATE VIDEO
if you prefer the automatic translation to another language use
the lang code
>>> cc = getyoutubecc.getyoutubecc('pNiFoYt69-w','fr','french', tlang:'es')
"""
caption_obj = {}
""" This object contains the fetched captions. Use this to treat the captions or whatever"""
def __init__(self, video_id, lang="en", track="", tlang="" ):
""" """
#Obtain the file from internet
cc_url = "http://www.youtube.com/api/timedtext?v=" + video_id + "&lang=" + lang + "&name=" + track + "&tlang=" + tlang
print "video id: " + video_id
print "video language: " + lang
print "video track: " + track
print "translate video to: " + tlang
try:
cc = urllib.urlopen(cc_url).read()
except:
print "Problem with connection"
#parse the file to make a easy to modify object with the captions and its time
if self.caption_obj == []:
print "url " + cc_url + " was an empty response. Multitrack video?"
self.caption_obj = self._parseXml(cc);
def writeSrtFile(self,filename="caption"):
srt_lines = self._generateSrt(self.caption_obj) #generate the srt file
srtfile = open(filename,'w')
for line in srt_lines:
srtfile.write( line.encode('utf8') + "\n")
def _parseXml(self,cc):
""" INPUT: XML file with captions
OUTPUT: parsed object like:
[{'texlines': [u"So, I'm going to rewrite this", 'in a more concise form as'],
'time': {'hours':'1', 'min':'2','sec':44,'msec':232} }]
"""
htmlpar = HTMLParser.HTMLParser()
cc = cc.split("</text>") # ['<text start="2997.929">So, it will\nhas time', '<text start="3000.929">blah', ..]
captions = []
for line in cc:
if re.search('text', line):
time = re.search(r'start="(\d+)(?:\.(\d+)){0,1}', line).groups() # ('2997','929')
time = ( int(time[0]), int(0 if not time[1] else time[1]) )
#convert seconds and millisec to int
text = re.search(r'">(.*)', line, re.DOTALL).group(1) # extract text i.e. 'So, it will\nhas time'
textlines = [ htmlpar.unescape(htmlpar.unescape( unicode(lineunparsed,"utf-8") )) for lineunparsed in text.split('\n') ]
#unscape chars like & or '
ntime = {'hours':time[0]/3600,"min":time[0]%3600/60,"sec":time[0]%3600%60,"msec":time[1]}
captions.append({'time':ntime,'textlines':textlines})
return captions
|
tomturner/django-tenants | django_tenants/postgresql_backend/introspection.py | DatabaseSchemaIntrospection.get_table_list | python | def get_table_list(self, cursor):
"""
Returns a list of table names in the current database and schema.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid)""" % self.connection.schema_name)
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables] | Returns a list of table names in the current database and schema. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/postgresql_backend/introspection.py#L22-L37 | null | class DatabaseSchemaIntrospection(DatabaseIntrospection):
"""
database schema introspection class
"""
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c,
INNER JOIN pg_catalog.pg_index idx ON c.oid = idx.indrelid
INNER JOIN pg_catalog.pg_class c2 ON idx.indexrelid = c2.oid
INNER JOIN pg_catalog.pg_attribute attr ON attr.attrelid = c.oid and attr.attnum = idx.indkey[0]
INNER JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = %s AND n.nspname = %s
"""
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database and schema.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid)""" % self.connection.schema_name)
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_schema = %s and table_name = %s""", [self.connection.schema_name, table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6] +
(field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name, self.connection.schema_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_namespace n ON n.oid = c1.relnamespace
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s and n.nspname = %s
AND con.contype = 'f'""", [table_name, self.connection.schema_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
get_constraints = _constraints.get_constraints
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY' AND tc.table_schema = %s
""", [table_name, self.connection.schema_name])
key_columns.extend(cursor.fetchall())
return key_columns
|
tomturner/django-tenants | django_tenants/postgresql_backend/introspection.py | DatabaseSchemaIntrospection.get_table_description | python | def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_schema = %s and table_name = %s""", [self.connection.schema_name, table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6] +
(field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description] | Returns a description of the table, with the DB-API cursor.description interface. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/postgresql_backend/introspection.py#L39-L51 | null | class DatabaseSchemaIntrospection(DatabaseIntrospection):
"""
database schema introspection class
"""
_get_indexes_query = """
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c,
INNER JOIN pg_catalog.pg_index idx ON c.oid = idx.indrelid
INNER JOIN pg_catalog.pg_class c2 ON idx.indexrelid = c2.oid
INNER JOIN pg_catalog.pg_attribute attr ON attr.attrelid = c.oid and attr.attnum = idx.indkey[0]
INNER JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relname = %s AND n.nspname = %s
"""
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database and schema.
"""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid)""" % self.connection.schema_name)
return [TableInfo(row[0], {'r': 't', 'v': 'v'}.get(row[1]))
for row in cursor.fetchall()
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable, column_default
FROM information_schema.columns
WHERE table_schema = %s and table_name = %s""", [self.connection.schema_name, table_name])
field_map = {line[0]: line[1:] for line in cursor.fetchall()}
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [FieldInfo(*((force_text(line[0]),) + line[1:6] +
(field_map[force_text(line[0])][0] == 'YES', field_map[force_text(line[0])][1])))
for line in cursor.description]
def get_indexes(self, cursor, table_name):
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute(self._get_indexes_query, [table_name, self.connection.schema_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""
SELECT c2.relname, a1.attname, a2.attname
FROM pg_constraint con
LEFT JOIN pg_class c1 ON con.conrelid = c1.oid
LEFT JOIN pg_namespace n ON n.oid = c1.relnamespace
LEFT JOIN pg_class c2 ON con.confrelid = c2.oid
LEFT JOIN pg_attribute a1 ON c1.oid = a1.attrelid AND a1.attnum = con.conkey[1]
LEFT JOIN pg_attribute a2 ON c2.oid = a2.attrelid AND a2.attnum = con.confkey[1]
WHERE c1.relname = %s and n.nspname = %s
AND con.contype = 'f'""", [table_name, self.connection.schema_name])
relations = {}
for row in cursor.fetchall():
relations[row[1]] = (row[2], row[0])
return relations
get_constraints = _constraints.get_constraints
def get_key_columns(self, cursor, table_name):
key_columns = []
cursor.execute("""
SELECT kcu.column_name, ccu.table_name AS referenced_table, ccu.column_name AS referenced_column
FROM information_schema.constraint_column_usage ccu
LEFT JOIN information_schema.key_column_usage kcu
ON ccu.constraint_catalog = kcu.constraint_catalog
AND ccu.constraint_schema = kcu.constraint_schema
AND ccu.constraint_name = kcu.constraint_name
LEFT JOIN information_schema.table_constraints tc
ON ccu.constraint_catalog = tc.constraint_catalog
AND ccu.constraint_schema = tc.constraint_schema
AND ccu.constraint_name = tc.constraint_name
WHERE kcu.table_name = %s AND tc.constraint_type = 'FOREIGN KEY' AND tc.table_schema = %s
""", [table_name, self.connection.schema_name])
key_columns.extend(cursor.fetchall())
return key_columns
|
tomturner/django-tenants | django_tenants/models.py | TenantMixin._drop_schema | python | def _drop_schema(self, force_drop=False):
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name) | Drops the schema | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/models.py#L128-L140 | [
"def get_public_schema_name():\n return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')\n",
"def get_tenant_database_alias():\n return getattr(settings, 'TENANT_DB_ALIAS', DEFAULT_DB_ALIAS)\n",
"def schema_exists(schema_name):\n _connection = connections[get_tenant_database_alias()]\n cursor = _co... | class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True, db_index=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
|
tomturner/django-tenants | django_tenants/models.py | TenantMixin.delete | python | def delete(self, force_drop=False, *args, **kwargs):
self._drop_schema(force_drop)
super().delete(*args, **kwargs) | Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/models.py#L148-L154 | [
"def _drop_schema(self, force_drop=False):\n \"\"\" Drops the schema\"\"\"\n connection = connections[get_tenant_database_alias()]\n has_schema = hasattr(connection, 'schema_name')\n if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):\n raise Exception(\"... | class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True, db_index=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
|
tomturner/django-tenants | django_tenants/models.py | TenantMixin.create_schema | python | def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public() | Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/models.py#L156-L196 | [
"def _check_schema_name(name):\n if not _is_valid_schema_name(name):\n raise ValidationError(\"Invalid string used for the schema name.\")\n",
"def get_tenant_database_alias():\n return getattr(settings, 'TENANT_DB_ALIAS', DEFAULT_DB_ALIAS)\n",
"def schema_exists(schema_name):\n _connection = co... | class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True, db_index=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
|
tomturner/django-tenants | django_tenants/models.py | TenantMixin.get_primary_domain | python | def get_primary_domain(self):
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None | Returns the primary domain of the tenant | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/models.py#L198-L206 | [
"def get_tenant_domain_model():\n return get_model(settings.TENANT_DOMAIN_MODEL)\n"
] | class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True, db_index=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def reverse(self, request, view_name):
"""
Returns the URL of this tenant.
"""
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url
|
tomturner/django-tenants | django_tenants/models.py | TenantMixin.reverse | python | def reverse(self, request, view_name):
http_type = 'https://' if request.is_secure() else 'http://'
domain = get_current_site(request).domain
url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name)))
return url | Returns the URL of this tenant. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/models.py#L208-L218 | null | class TenantMixin(models.Model):
"""
All tenant models must inherit this class.
"""
auto_drop_schema = False
"""
USE THIS WITH CAUTION!
Set this flag to true on a parent class if you want the schema to be
automatically deleted if the tenant row gets deleted.
"""
auto_create_schema = True
"""
Set this flag to false on a parent class if you don't want the schema
to be automatically created upon save.
"""
schema_name = models.CharField(max_length=63, unique=True, db_index=True,
validators=[_check_schema_name])
domain_url = None
"""
Leave this as None. Stores the current domain url so it can be used in the logs
"""
_previous_tenant = []
class Meta:
abstract = True
def __enter__(self):
"""
Syntax sugar which helps in celery tasks, cron jobs, and other scripts
Usage:
with Tenant.objects.get(schema_name='test') as tenant:
# run some code in tenant test
# run some code in previous tenant (public probably)
"""
connection = connections[get_tenant_database_alias()]
self._previous_tenant.append(connection.tenant)
self.activate()
def __exit__(self, exc_type, exc_val, exc_tb):
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self._previous_tenant.pop())
def activate(self):
"""
Syntax sugar that helps at django shell with fast tenant changing
Usage:
Tenant.objects.get(schema_name='test').activate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_tenant(self)
@classmethod
def deactivate(cls):
"""
Syntax sugar, return to public schema
Usage:
test_tenant.deactivate()
# or simpler
Tenant.deactivate()
"""
connection = connections[get_tenant_database_alias()]
connection.set_schema_to_public()
def save(self, verbosity=1, *args, **kwargs):
connection = connections[get_tenant_database_alias()]
is_new = self.pk is None
has_schema = hasattr(connection, 'schema_name')
if has_schema and is_new and connection.schema_name != get_public_schema_name():
raise Exception("Can't create tenant outside the public schema. "
"Current schema is %s." % connection.schema_name)
elif has_schema and not is_new and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't update tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
super().save(*args, **kwargs)
if has_schema and is_new and self.auto_create_schema:
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the tenant, delete what we created and
# re-raise the exception
self.delete(force_drop=True)
raise
elif is_new:
# although we are not using the schema functions directly, the signal might be registered by a listener
schema_needs_to_be_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
elif not is_new and self.auto_create_schema and not schema_exists(self.schema_name):
# Create schemas for existing models, deleting only the schema on failure
try:
self.create_schema(check_if_exists=True, verbosity=verbosity)
post_schema_sync.send(sender=TenantMixin, tenant=self.serializable_fields())
except Exception:
# We failed creating the schema, delete what we created and
# re-raise the exception
self._drop_schema()
raise
def serializable_fields(self):
""" in certain cases the user model isn't serializable so you may want to only send the id """
return self
def _drop_schema(self, force_drop=False):
""" Drops the schema"""
connection = connections[get_tenant_database_alias()]
has_schema = hasattr(connection, 'schema_name')
if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()):
raise Exception("Can't delete tenant outside it's own schema or "
"the public schema. Current schema is %s."
% connection.schema_name)
if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop):
self.pre_drop()
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
def pre_drop(self):
"""
This is a routine which you could override to backup the tenant schema before dropping.
:return:
"""
def delete(self, force_drop=False, *args, **kwargs):
"""
Deletes this row. Drops the tenant's schema if the attribute
auto_drop_schema set to True.
"""
self._drop_schema(force_drop)
super().delete(*args, **kwargs)
def create_schema(self, check_if_exists=False, sync_schema=True,
verbosity=1):
"""
Creates the schema 'schema_name' for this tenant. Optionally checks if
the schema already exists before creating it. Returns true if the
schema was created, false otherwise.
"""
# safety check
connection = connections[get_tenant_database_alias()]
_check_schema_name(self.schema_name)
cursor = connection.cursor()
if check_if_exists and schema_exists(self.schema_name):
return False
fake_migrations = get_creation_fakes_migrations()
if sync_schema:
if fake_migrations:
# copy tables and data from provided model schema
base_schema = get_tenant_base_schema()
clone_schema = CloneSchema()
clone_schema.clone_schema(base_schema, self.schema_name)
call_command('migrate_schemas',
tenant=True,
fake=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
else:
# create the schema
cursor.execute('CREATE SCHEMA %s' % self.schema_name)
call_command('migrate_schemas',
tenant=True,
schema_name=self.schema_name,
interactive=False,
verbosity=verbosity)
connection.set_schema_to_public()
def get_primary_domain(self):
"""
Returns the primary domain of the tenant
"""
try:
domain = self.domains.get(is_primary=True)
return domain
except get_tenant_domain_model().DoesNotExist:
return None
|
tomturner/django-tenants | django_tenants/routers.py | TenantSyncRouter.app_in_list | python | def app_in_list(self, app_label, apps_list):
appconfig = django_apps.get_app_config(app_label)
appconfig_full_name = '{}.{}'.format(
appconfig.__module__, appconfig.__class__.__name__)
return (appconfig.name in apps_list) or (appconfig_full_name in apps_list) | Is 'app_label' present in 'apps_list'?
apps_list is either settings.SHARED_APPS or settings.TENANT_APPS, a
list of app names.
We check the presence of the app's name or the full path to the apps's
AppConfig class.
https://docs.djangoproject.com/en/1.8/ref/applications/#configuring-applications | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/routers.py#L11-L25 | null | class TenantSyncRouter(object):
"""
A router to control which applications will be synced,
depending if we are syncing the shared apps or the tenant apps.
"""
def allow_migrate(self, db, app_label, model_name=None, **hints):
# the imports below need to be done here else django <1.5 goes crazy
# https://code.djangoproject.com/ticket/20704
from django.db import connections
from django_tenants.utils import get_public_schema_name, get_tenant_database_alias
if db != get_tenant_database_alias():
return False
connection = connections[db]
if connection.schema_name == get_public_schema_name():
if not self.app_in_list(app_label, settings.SHARED_APPS):
return False
else:
if not self.app_in_list(app_label, settings.TENANT_APPS):
return False
return None
|
tomturner/django-tenants | django_tenants/staticfiles/finders.py | TenantFileSystemFinder.locations | python | def locations(self):
if self._locations.get(connection.schema_name, None) is None:
schema_locations = []
for root in settings.MULTITENANT_STATICFILES_DIRS:
root = utils.parse_tenant_config_path(root)
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ""
if (prefix, root) not in schema_locations:
schema_locations.append((prefix, root))
self._locations[connection.schema_name] = schema_locations
return self._locations[connection.schema_name] | Lazy retrieval of list of locations with static files based on current tenant schema.
:return: The list of static file dirs that have been configured for this tenant. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/staticfiles/finders.py#L31-L51 | [
"def parse_tenant_config_path(config_path):\n \"\"\"\n Convenience function for parsing django-tenants' path configuration strings.\n\n If the string contains '%s', then the current tenant's schema name will be inserted at that location. Otherwise\n the schema name will be appended to the end of the str... | class TenantFileSystemFinder(FileSystemFinder):
"""
A static files finder that uses the ``MULTITENANT_STATICFILES_DIRS`` setting
to locate files for different tenants.
The only difference between this and the standard FileSystemFinder implementation
is that we need to keep references to the storage locations of the static files,
as well as maps of dir paths to an appropriate storage instance, for each tenant.
"""
def __init__(self, app_names=None, *args, **kwargs):
# Don't call parent's init method as settings.STATICFILES_DIRS will be loaded
# by the standard FileSystemFinder already.
# Instead of initializing the locations and storages now, we'll do so lazily
# the first time they are needed.
self._locations = {}
self._storages = {}
@property
@locations.setter
def locations(self, value):
self._locations[connection.schema_name] = value
@property
def storages(self):
"""
Lazy retrieval of list of storage handlers for the current tenant.
:return: A ,a[ pf dir paths to an appropriate storage instance.
"""
if self._storages.get(connection.schema_name, None) is None:
schema_storages = OrderedDict()
for prefix, root in self.locations:
filesystem_storage = TenantStaticFilesStorage(location=root)
filesystem_storage.prefix = prefix
schema_storages[root] = filesystem_storage
self._storages[connection.schema_name] = schema_storages
return self._storages[connection.schema_name]
@storages.setter
def storages(self, value):
self._storages[connection.schema_name] = value
def check(self, **kwargs):
"""
In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list.
"""
errors = super().check(**kwargs)
multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS
if not isinstance(multitenant_staticfiles_dirs, (list, tuple)):
errors.append(
Error(
"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
)
)
return errors
|
tomturner/django-tenants | django_tenants/staticfiles/finders.py | TenantFileSystemFinder.storages | python | def storages(self):
if self._storages.get(connection.schema_name, None) is None:
schema_storages = OrderedDict()
for prefix, root in self.locations:
filesystem_storage = TenantStaticFilesStorage(location=root)
filesystem_storage.prefix = prefix
schema_storages[root] = filesystem_storage
self._storages[connection.schema_name] = schema_storages
return self._storages[connection.schema_name] | Lazy retrieval of list of storage handlers for the current tenant.
:return: A ,a[ pf dir paths to an appropriate storage instance. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/staticfiles/finders.py#L58-L73 | null | class TenantFileSystemFinder(FileSystemFinder):
"""
A static files finder that uses the ``MULTITENANT_STATICFILES_DIRS`` setting
to locate files for different tenants.
The only difference between this and the standard FileSystemFinder implementation
is that we need to keep references to the storage locations of the static files,
as well as maps of dir paths to an appropriate storage instance, for each tenant.
"""
def __init__(self, app_names=None, *args, **kwargs):
# Don't call parent's init method as settings.STATICFILES_DIRS will be loaded
# by the standard FileSystemFinder already.
# Instead of initializing the locations and storages now, we'll do so lazily
# the first time they are needed.
self._locations = {}
self._storages = {}
@property
def locations(self):
"""
Lazy retrieval of list of locations with static files based on current tenant schema.
:return: The list of static file dirs that have been configured for this tenant.
"""
if self._locations.get(connection.schema_name, None) is None:
schema_locations = []
for root in settings.MULTITENANT_STATICFILES_DIRS:
root = utils.parse_tenant_config_path(root)
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ""
if (prefix, root) not in schema_locations:
schema_locations.append((prefix, root))
self._locations[connection.schema_name] = schema_locations
return self._locations[connection.schema_name]
@locations.setter
def locations(self, value):
self._locations[connection.schema_name] = value
@property
@storages.setter
def storages(self, value):
self._storages[connection.schema_name] = value
def check(self, **kwargs):
"""
In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list.
"""
errors = super().check(**kwargs)
multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS
if not isinstance(multitenant_staticfiles_dirs, (list, tuple)):
errors.append(
Error(
"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
)
)
return errors
|
tomturner/django-tenants | django_tenants/staticfiles/finders.py | TenantFileSystemFinder.check | python | def check(self, **kwargs):
errors = super().check(**kwargs)
multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS
if not isinstance(multitenant_staticfiles_dirs, (list, tuple)):
errors.append(
Error(
"Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.",
hint="Perhaps you forgot a trailing comma?",
)
)
return errors | In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS
is a tuple or a list. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/staticfiles/finders.py#L79-L95 | null | class TenantFileSystemFinder(FileSystemFinder):
"""
A static files finder that uses the ``MULTITENANT_STATICFILES_DIRS`` setting
to locate files for different tenants.
The only difference between this and the standard FileSystemFinder implementation
is that we need to keep references to the storage locations of the static files,
as well as maps of dir paths to an appropriate storage instance, for each tenant.
"""
def __init__(self, app_names=None, *args, **kwargs):
# Don't call parent's init method as settings.STATICFILES_DIRS will be loaded
# by the standard FileSystemFinder already.
# Instead of initializing the locations and storages now, we'll do so lazily
# the first time they are needed.
self._locations = {}
self._storages = {}
@property
def locations(self):
"""
Lazy retrieval of list of locations with static files based on current tenant schema.
:return: The list of static file dirs that have been configured for this tenant.
"""
if self._locations.get(connection.schema_name, None) is None:
schema_locations = []
for root in settings.MULTITENANT_STATICFILES_DIRS:
root = utils.parse_tenant_config_path(root)
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ""
if (prefix, root) not in schema_locations:
schema_locations.append((prefix, root))
self._locations[connection.schema_name] = schema_locations
return self._locations[connection.schema_name]
@locations.setter
def locations(self, value):
self._locations[connection.schema_name] = value
@property
def storages(self):
"""
Lazy retrieval of list of storage handlers for the current tenant.
:return: A ,a[ pf dir paths to an appropriate storage instance.
"""
if self._storages.get(connection.schema_name, None) is None:
schema_storages = OrderedDict()
for prefix, root in self.locations:
filesystem_storage = TenantStaticFilesStorage(location=root)
filesystem_storage.prefix = prefix
schema_storages[root] = filesystem_storage
self._storages[connection.schema_name] = schema_storages
return self._storages[connection.schema_name]
@storages.setter
def storages(self, value):
self._storages[connection.schema_name] = value
|
tomturner/django-tenants | django_tenants/postgresql_backend/_constraints.py | get_constraints | python | def get_constraints(self, cursor, table_name):
constraints = {}
# Loop over the key table, collecting things as constraints. The column
# array must return column names in the same order in which they were
# created.
# The subquery containing generate_series can be replaced with
# "WITH ORDINALITY" when support for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
c.conname,
array(
SELECT attname
FROM (
SELECT unnest(c.conkey) AS colid,
generate_series(1, array_length(c.conkey, 1)) AS arridx
) AS cols
JOIN pg_attribute AS ca ON cols.colid = ca.attnum
WHERE ca.attrelid = c.conrelid
ORDER BY cols.arridx
),
c.contype,
(SELECT fkc.relname || '.' || fka.attname
FROM pg_attribute AS fka
JOIN pg_class AS fkc ON fka.attrelid = fkc.oid
WHERE fka.attrelid = c.confrelid AND fka.attnum = c.confkey[1]),
cl.reloptions
FROM pg_constraint AS c
JOIN pg_class AS cl ON c.conrelid = cl.oid
JOIN pg_namespace AS ns ON cl.relnamespace = ns.oid
WHERE ns.nspname = %s AND cl.relname = %s
""", [self.connection.schema_name, table_name])
for constraint, columns, kind, used_cols, options in cursor.fetchall():
constraints[constraint] = {
"columns": columns,
"primary_key": kind == "p",
"unique": kind in ["p", "u"],
"foreign_key": tuple(used_cols.split(".", 1)) if kind == "f" else None,
"check": kind == "c",
"index": False,
"definition": None,
"options": options,
}
# Now get indexes
# The row_number() function for ordering the index fields can be
# replaced by WITH ORDINALITY in the unnest() functions when support
# for PostgreSQL 9.3 is dropped.
cursor.execute("""
SELECT
indexname, array_agg(attname ORDER BY rnum), indisunique, indisprimary,
array_agg(ordering ORDER BY rnum), amname, exprdef, s2.attoptions
FROM (
SELECT
row_number() OVER () as rnum, c2.relname as indexname,
idx.*, attr.attname, am.amname,
CASE
WHEN idx.indexprs IS NOT NULL THEN
pg_get_indexdef(idx.indexrelid)
END AS exprdef,
CASE am.amname
WHEN 'btree' THEN
CASE (option & 1)
WHEN 1 THEN 'DESC' ELSE 'ASC'
END
END as ordering,
c2.reloptions as attoptions
FROM (
SELECT
*, unnest(i.indkey) as key, unnest(i.indoption) as option
FROM pg_index i
) idx
LEFT JOIN pg_class c ON idx.indrelid = c.oid
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_class c2 ON idx.indexrelid = c2.oid
LEFT JOIN pg_am am ON c2.relam = am.oid
LEFT JOIN pg_attribute attr ON attr.attrelid = c.oid AND attr.attnum = idx.key
WHERE c.relname = %s and n.nspname = %s
) s2
GROUP BY indexname, indisunique, indisprimary, amname, exprdef, attoptions;
""", [table_name, self.connection.schema_name])
for index, columns, unique, primary, orders, type_, definition, options in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": columns if columns != [None] else [],
"orders": orders if orders != [None] else [],
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
"type": Index.suffix if type_ == 'btree' else type_,
"definition": definition,
"options": options,
}
return constraints | Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns. Also retrieve the definition of expression-based
indexes. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/postgresql_backend/_constraints.py#L4-L102 | null | from django.db.models.indexes import Index
|
tomturner/django-tenants | django_tenants/postgresql_backend/base.py | DatabaseWrapper.set_tenant | python | def set_tenant(self, tenant, include_public=True):
self.tenant = tenant
self.schema_name = tenant.schema_name
self.include_public_schema = include_public
self.set_settings_schema(self.schema_name)
self.search_path_set = False | Main API method to current database schema,
but it does not actually modify the db connection. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/postgresql_backend/base.py#L67-L76 | [
"def set_settings_schema(self, schema_name):\n self.settings_dict['SCHEMA'] = schema_name\n"
] | class DatabaseWrapper(original_backend.DatabaseWrapper):
"""
Adds the capability to manipulate the search_path using set_tenant and set_schema_name
"""
include_public_schema = True
def __init__(self, *args, **kwargs):
self.search_path_set = None
self.tenant = None
self.schema_name = None
super().__init__(*args, **kwargs)
# Use a patched version of the DatabaseIntrospection that only returns the table list for the
# currently selected schema.
self.introspection = DatabaseSchemaIntrospection(self)
self.set_schema_to_public()
def close(self):
self.search_path_set = False
super().close()
def set_schema(self, schema_name, include_public=True):
"""
Main API method to current database schema,
but it does not actually modify the db connection.
"""
self.tenant = FakeTenant(schema_name=schema_name)
self.schema_name = schema_name
self.include_public_schema = include_public
self.set_settings_schema(schema_name)
self.search_path_set = False
# Content type can no longer be cached as public and tenant schemas
# have different models. If someone wants to change this, the cache
# needs to be separated between public and shared schemas. If this
# cache isn't cleared, this can cause permission problems. For example,
# on public, a particular model has id 14, but on the tenants it has
# the id 15. if 14 is cached instead of 15, the permissions for the
# wrong model will be fetched.
ContentType.objects.clear_cache()
def set_schema_to_public(self):
"""
Instructs to stay in the common 'public' schema.
"""
self.tenant = FakeTenant(schema_name=get_public_schema_name())
self.schema_name = get_public_schema_name()
self.set_settings_schema(self.schema_name)
self.search_path_set = False
def set_settings_schema(self, schema_name):
self.settings_dict['SCHEMA'] = schema_name
def get_schema(self):
warnings.warn("connection.get_schema() is deprecated, use connection.schema_name instead.",
category=DeprecationWarning)
return self.schema_name
def get_tenant(self):
warnings.warn("connection.get_tenant() is deprecated, use connection.tenant instead.",
category=DeprecationWarning)
return self.tenant
def _cursor(self, name=None):
"""
Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path.
"""
if name:
# Only supported and required by Django 1.11 (server-side cursor)
cursor = super()._cursor(name=name)
else:
cursor = super()._cursor()
# optionally limit the number of executions - under load, the execution
# of `set search_path` can be quite time consuming
if (not get_limit_set_calls()) or not self.search_path_set:
# Actual search_path modification for the cursor. Database will
# search schemata from left to right when looking for the object
# (table, index, sequence, etc.).
if not self.schema_name:
raise ImproperlyConfigured("Database schema not set. Did you forget "
"to call set_schema() or set_tenant()?")
_check_schema_name(self.schema_name)
public_schema_name = get_public_schema_name()
search_paths = []
if self.schema_name == public_schema_name:
search_paths = [public_schema_name]
elif self.include_public_schema:
search_paths = [self.schema_name, public_schema_name]
else:
search_paths = [self.schema_name]
search_paths.extend(EXTRA_SEARCH_PATHS)
if name:
# Named cursor can only be used once
cursor_for_search_path = self.connection.cursor()
else:
# Reuse
cursor_for_search_path = cursor
# In the event that an error already happened in this transaction and we are going
# to rollback we should just ignore database error when setting the search_path
# if the next instruction is not a rollback it will just fail also, so
# we do not have to worry that it's not the good one
try:
cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths)))
except (django.db.utils.DatabaseError, psycopg2.InternalError):
self.search_path_set = False
else:
self.search_path_set = True
if name:
cursor_for_search_path.close()
return cursor
|
tomturner/django-tenants | django_tenants/postgresql_backend/base.py | DatabaseWrapper.set_schema_to_public | python | def set_schema_to_public(self):
self.tenant = FakeTenant(schema_name=get_public_schema_name())
self.schema_name = get_public_schema_name()
self.set_settings_schema(self.schema_name)
self.search_path_set = False | Instructs to stay in the common 'public' schema. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/postgresql_backend/base.py#L97-L104 | [
"def get_public_schema_name():\n return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')\n",
"def set_settings_schema(self, schema_name):\n self.settings_dict['SCHEMA'] = schema_name\n"
] | class DatabaseWrapper(original_backend.DatabaseWrapper):
"""
Adds the capability to manipulate the search_path using set_tenant and set_schema_name
"""
include_public_schema = True
def __init__(self, *args, **kwargs):
self.search_path_set = None
self.tenant = None
self.schema_name = None
super().__init__(*args, **kwargs)
# Use a patched version of the DatabaseIntrospection that only returns the table list for the
# currently selected schema.
self.introspection = DatabaseSchemaIntrospection(self)
self.set_schema_to_public()
def close(self):
self.search_path_set = False
super().close()
def set_tenant(self, tenant, include_public=True):
"""
Main API method to current database schema,
but it does not actually modify the db connection.
"""
self.tenant = tenant
self.schema_name = tenant.schema_name
self.include_public_schema = include_public
self.set_settings_schema(self.schema_name)
self.search_path_set = False
def set_schema(self, schema_name, include_public=True):
"""
Main API method to current database schema,
but it does not actually modify the db connection.
"""
self.tenant = FakeTenant(schema_name=schema_name)
self.schema_name = schema_name
self.include_public_schema = include_public
self.set_settings_schema(schema_name)
self.search_path_set = False
# Content type can no longer be cached as public and tenant schemas
# have different models. If someone wants to change this, the cache
# needs to be separated between public and shared schemas. If this
# cache isn't cleared, this can cause permission problems. For example,
# on public, a particular model has id 14, but on the tenants it has
# the id 15. if 14 is cached instead of 15, the permissions for the
# wrong model will be fetched.
ContentType.objects.clear_cache()
def set_settings_schema(self, schema_name):
self.settings_dict['SCHEMA'] = schema_name
def get_schema(self):
warnings.warn("connection.get_schema() is deprecated, use connection.schema_name instead.",
category=DeprecationWarning)
return self.schema_name
def get_tenant(self):
warnings.warn("connection.get_tenant() is deprecated, use connection.tenant instead.",
category=DeprecationWarning)
return self.tenant
def _cursor(self, name=None):
"""
Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path.
"""
if name:
# Only supported and required by Django 1.11 (server-side cursor)
cursor = super()._cursor(name=name)
else:
cursor = super()._cursor()
# optionally limit the number of executions - under load, the execution
# of `set search_path` can be quite time consuming
if (not get_limit_set_calls()) or not self.search_path_set:
# Actual search_path modification for the cursor. Database will
# search schemata from left to right when looking for the object
# (table, index, sequence, etc.).
if not self.schema_name:
raise ImproperlyConfigured("Database schema not set. Did you forget "
"to call set_schema() or set_tenant()?")
_check_schema_name(self.schema_name)
public_schema_name = get_public_schema_name()
search_paths = []
if self.schema_name == public_schema_name:
search_paths = [public_schema_name]
elif self.include_public_schema:
search_paths = [self.schema_name, public_schema_name]
else:
search_paths = [self.schema_name]
search_paths.extend(EXTRA_SEARCH_PATHS)
if name:
# Named cursor can only be used once
cursor_for_search_path = self.connection.cursor()
else:
# Reuse
cursor_for_search_path = cursor
# In the event that an error already happened in this transaction and we are going
# to rollback we should just ignore database error when setting the search_path
# if the next instruction is not a rollback it will just fail also, so
# we do not have to worry that it's not the good one
try:
cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths)))
except (django.db.utils.DatabaseError, psycopg2.InternalError):
self.search_path_set = False
else:
self.search_path_set = True
if name:
cursor_for_search_path.close()
return cursor
|
tomturner/django-tenants | django_tenants/template/loaders/cached.py | Loader.cache_key | python | def cache_key(self, template_name, skip=None):
dirs_prefix = ''
skip_prefix = ''
tenant_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if connection.tenant:
tenant_prefix = str(connection.tenant.pk)
return '-'.join(s for s in (str(template_name), tenant_prefix, skip_prefix, dirs_prefix) if s) | Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/template/loaders/cached.py#L13-L38 | null | class Loader(BaseLoader):
|
tomturner/django-tenants | django_tenants/utils.py | get_creation_fakes_migrations | python | def get_creation_fakes_migrations():
faked = getattr(settings, 'TENANT_CREATION_FAKES_MIGRATIONS', False)
if faked:
if not getattr(settings, 'TENANT_BASE_SCHEMA', False):
raise ImproperlyConfigured(
'You must specify a schema name in TENANT_BASE_SCHEMA if '
'TENANT_CREATION_FAKES_MIGRATIONS is enabled.'
)
return faked | If TENANT_CREATION_FAKES_MIGRATIONS, tenants will be created by cloning an
existing schema specified by TENANT_CLONE_BASE. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/utils.py#L37-L49 | null | import os
from contextlib import ContextDecorator
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, DEFAULT_DB_ALIAS, connection
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.core import mail
def get_tenant_model():
return get_model(settings.TENANT_MODEL)
def get_tenant_domain_model():
return get_model(settings.TENANT_DOMAIN_MODEL)
def get_tenant_database_alias():
return getattr(settings, 'TENANT_DB_ALIAS', DEFAULT_DB_ALIAS)
def get_public_schema_name():
return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')
def get_limit_set_calls():
return getattr(settings, 'TENANT_LIMIT_SET_CALLS', False)
def get_tenant_base_schema():
"""
If TENANT_CREATION_FAKES_MIGRATIONS, tenants will be created by cloning an
existing schema specified by TENANT_CLONE_BASE.
"""
schema = getattr(settings, 'TENANT_BASE_SCHEMA', False)
if schema:
if not getattr(settings, 'TENANT_CREATION_FAKES_MIGRATIONS', False):
raise ImproperlyConfigured(
'TENANT_CREATION_FAKES_MIGRATIONS setting must be True to use '
'TENANT_BASE_SCHEMA for cloning.'
)
return schema
class schema_context(ContextDecorator):
def __init__(self, *args, **kwargs):
self.schema_name = args[0]
super().__init__()
def __enter__(self):
self.connection = connections[get_tenant_database_alias()]
self.previous_tenant = connection.tenant
self.connection.set_schema(self.schema_name)
def __exit__(self, *exc):
if self.previous_tenant is None:
self.connection.set_schema_to_public()
else:
self.connection.set_tenant(self.previous_tenant)
class tenant_context(ContextDecorator):
def __init__(self, *args, **kwargs):
self.tenant = args[0]
super().__init__()
def __enter__(self):
self.connection = connections[get_tenant_database_alias()]
self.previous_tenant = connection.tenant
self.connection.set_tenant(self.tenant)
def __exit__(self, *exc):
if self.previous_tenant is None:
self.connection.set_schema_to_public()
else:
self.connection.set_tenant(self.previous_tenant)
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string
def remove_www_and_dev(hostname):
"""
Legacy function - just in case someone is still using the old name
"""
return remove_www(hostname)
def remove_www(hostname):
"""
Removes www. from the beginning of the address. Only for
routing purposes. www.test.com/login/ and test.com/login/ should
find the same tenant.
"""
if hostname.startswith("www."):
return hostname[4:]
return hostname
def django_is_in_test_mode():
"""
I know this is very ugly! I'm looking for more elegant solutions.
See: http://stackoverflow.com/questions/6957016/detect-django-testing-mode
"""
return hasattr(mail, 'outbox')
def schema_exists(schema_name):
_connection = connections[get_tenant_database_alias()]
cursor = _connection.cursor()
# check if this schema already exists in the db
sql = 'SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace WHERE LOWER(nspname) = LOWER(%s))'
cursor.execute(sql, (schema_name, ))
row = cursor.fetchone()
if row:
exists = row[0]
else:
exists = False
cursor.close()
return exists
def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list
"""
return [app.split('.')[-1] for app in apps_list]
def parse_tenant_config_path(config_path):
"""
Convenience function for parsing django-tenants' path configuration strings.
If the string contains '%s', then the current tenant's schema name will be inserted at that location. Otherwise
the schema name will be appended to the end of the string.
:param config_path: A configuration path string that optionally contains '%s' to indicate where the tenant
schema name should be inserted.
:return: The formatted string containing the schema name
"""
try:
# Insert schema name
return config_path % connection.schema_name
except (TypeError, ValueError):
# No %s in string; append schema name at the end
return os.path.join(config_path, connection.schema_name)
|
tomturner/django-tenants | django_tenants/utils.py | get_tenant_base_schema | python | def get_tenant_base_schema():
schema = getattr(settings, 'TENANT_BASE_SCHEMA', False)
if schema:
if not getattr(settings, 'TENANT_CREATION_FAKES_MIGRATIONS', False):
raise ImproperlyConfigured(
'TENANT_CREATION_FAKES_MIGRATIONS setting must be True to use '
'TENANT_BASE_SCHEMA for cloning.'
)
return schema | If TENANT_CREATION_FAKES_MIGRATIONS, tenants will be created by cloning an
existing schema specified by TENANT_CLONE_BASE. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/utils.py#L52-L64 | null | import os
from contextlib import ContextDecorator
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, DEFAULT_DB_ALIAS, connection
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.core import mail
def get_tenant_model():
return get_model(settings.TENANT_MODEL)
def get_tenant_domain_model():
return get_model(settings.TENANT_DOMAIN_MODEL)
def get_tenant_database_alias():
return getattr(settings, 'TENANT_DB_ALIAS', DEFAULT_DB_ALIAS)
def get_public_schema_name():
return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')
def get_limit_set_calls():
return getattr(settings, 'TENANT_LIMIT_SET_CALLS', False)
def get_creation_fakes_migrations():
"""
If TENANT_CREATION_FAKES_MIGRATIONS, tenants will be created by cloning an
existing schema specified by TENANT_CLONE_BASE.
"""
faked = getattr(settings, 'TENANT_CREATION_FAKES_MIGRATIONS', False)
if faked:
if not getattr(settings, 'TENANT_BASE_SCHEMA', False):
raise ImproperlyConfigured(
'You must specify a schema name in TENANT_BASE_SCHEMA if '
'TENANT_CREATION_FAKES_MIGRATIONS is enabled.'
)
return faked
class schema_context(ContextDecorator):
def __init__(self, *args, **kwargs):
self.schema_name = args[0]
super().__init__()
def __enter__(self):
self.connection = connections[get_tenant_database_alias()]
self.previous_tenant = connection.tenant
self.connection.set_schema(self.schema_name)
def __exit__(self, *exc):
if self.previous_tenant is None:
self.connection.set_schema_to_public()
else:
self.connection.set_tenant(self.previous_tenant)
class tenant_context(ContextDecorator):
def __init__(self, *args, **kwargs):
self.tenant = args[0]
super().__init__()
def __enter__(self):
self.connection = connections[get_tenant_database_alias()]
self.previous_tenant = connection.tenant
self.connection.set_tenant(self.tenant)
def __exit__(self, *exc):
if self.previous_tenant is None:
self.connection.set_schema_to_public()
else:
self.connection.set_tenant(self.previous_tenant)
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string
def remove_www_and_dev(hostname):
"""
Legacy function - just in case someone is still using the old name
"""
return remove_www(hostname)
def remove_www(hostname):
"""
Removes www. from the beginning of the address. Only for
routing purposes. www.test.com/login/ and test.com/login/ should
find the same tenant.
"""
if hostname.startswith("www."):
return hostname[4:]
return hostname
def django_is_in_test_mode():
"""
I know this is very ugly! I'm looking for more elegant solutions.
See: http://stackoverflow.com/questions/6957016/detect-django-testing-mode
"""
return hasattr(mail, 'outbox')
def schema_exists(schema_name):
_connection = connections[get_tenant_database_alias()]
cursor = _connection.cursor()
# check if this schema already exists in the db
sql = 'SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace WHERE LOWER(nspname) = LOWER(%s))'
cursor.execute(sql, (schema_name, ))
row = cursor.fetchone()
if row:
exists = row[0]
else:
exists = False
cursor.close()
return exists
def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list
"""
return [app.split('.')[-1] for app in apps_list]
def parse_tenant_config_path(config_path):
"""
Convenience function for parsing django-tenants' path configuration strings.
If the string contains '%s', then the current tenant's schema name will be inserted at that location. Otherwise
the schema name will be appended to the end of the string.
:param config_path: A configuration path string that optionally contains '%s' to indicate where the tenant
schema name should be inserted.
:return: The formatted string containing the schema name
"""
try:
# Insert schema name
return config_path % connection.schema_name
except (TypeError, ValueError):
# No %s in string; append schema name at the end
return os.path.join(config_path, connection.schema_name)
|
tomturner/django-tenants | django_tenants/utils.py | parse_tenant_config_path | python | def parse_tenant_config_path(config_path):
try:
# Insert schema name
return config_path % connection.schema_name
except (TypeError, ValueError):
# No %s in string; append schema name at the end
return os.path.join(config_path, connection.schema_name) | Convenience function for parsing django-tenants' path configuration strings.
If the string contains '%s', then the current tenant's schema name will be inserted at that location. Otherwise
the schema name will be appended to the end of the string.
:param config_path: A configuration path string that optionally contains '%s' to indicate where the tenant
schema name should be inserted.
:return: The formatted string containing the schema name | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/utils.py#L165-L182 | null | import os
from contextlib import ContextDecorator
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, DEFAULT_DB_ALIAS, connection
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.core import mail
def get_tenant_model():
return get_model(settings.TENANT_MODEL)
def get_tenant_domain_model():
return get_model(settings.TENANT_DOMAIN_MODEL)
def get_tenant_database_alias():
return getattr(settings, 'TENANT_DB_ALIAS', DEFAULT_DB_ALIAS)
def get_public_schema_name():
return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')
def get_limit_set_calls():
return getattr(settings, 'TENANT_LIMIT_SET_CALLS', False)
def get_creation_fakes_migrations():
"""
If TENANT_CREATION_FAKES_MIGRATIONS, tenants will be created by cloning an
existing schema specified by TENANT_CLONE_BASE.
"""
faked = getattr(settings, 'TENANT_CREATION_FAKES_MIGRATIONS', False)
if faked:
if not getattr(settings, 'TENANT_BASE_SCHEMA', False):
raise ImproperlyConfigured(
'You must specify a schema name in TENANT_BASE_SCHEMA if '
'TENANT_CREATION_FAKES_MIGRATIONS is enabled.'
)
return faked
def get_tenant_base_schema():
"""
If TENANT_CREATION_FAKES_MIGRATIONS, tenants will be created by cloning an
existing schema specified by TENANT_CLONE_BASE.
"""
schema = getattr(settings, 'TENANT_BASE_SCHEMA', False)
if schema:
if not getattr(settings, 'TENANT_CREATION_FAKES_MIGRATIONS', False):
raise ImproperlyConfigured(
'TENANT_CREATION_FAKES_MIGRATIONS setting must be True to use '
'TENANT_BASE_SCHEMA for cloning.'
)
return schema
class schema_context(ContextDecorator):
def __init__(self, *args, **kwargs):
self.schema_name = args[0]
super().__init__()
def __enter__(self):
self.connection = connections[get_tenant_database_alias()]
self.previous_tenant = connection.tenant
self.connection.set_schema(self.schema_name)
def __exit__(self, *exc):
if self.previous_tenant is None:
self.connection.set_schema_to_public()
else:
self.connection.set_tenant(self.previous_tenant)
class tenant_context(ContextDecorator):
def __init__(self, *args, **kwargs):
self.tenant = args[0]
super().__init__()
def __enter__(self):
self.connection = connections[get_tenant_database_alias()]
self.previous_tenant = connection.tenant
self.connection.set_tenant(self.tenant)
def __exit__(self, *exc):
if self.previous_tenant is None:
self.connection.set_schema_to_public()
else:
self.connection.set_tenant(self.previous_tenant)
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string
def remove_www_and_dev(hostname):
"""
Legacy function - just in case someone is still using the old name
"""
return remove_www(hostname)
def remove_www(hostname):
"""
Removes www. from the beginning of the address. Only for
routing purposes. www.test.com/login/ and test.com/login/ should
find the same tenant.
"""
if hostname.startswith("www."):
return hostname[4:]
return hostname
def django_is_in_test_mode():
"""
I know this is very ugly! I'm looking for more elegant solutions.
See: http://stackoverflow.com/questions/6957016/detect-django-testing-mode
"""
return hasattr(mail, 'outbox')
def schema_exists(schema_name):
_connection = connections[get_tenant_database_alias()]
cursor = _connection.cursor()
# check if this schema already exists in the db
sql = 'SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace WHERE LOWER(nspname) = LOWER(%s))'
cursor.execute(sql, (schema_name, ))
row = cursor.fetchone()
if row:
exists = row[0]
else:
exists = False
cursor.close()
return exists
def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list
"""
return [app.split('.')[-1] for app in apps_list]
|
tomturner/django-tenants | django_tenants/clone.py | CloneSchema._create_clone_schema_function | python | def _create_clone_schema_function(self):
cursor = connection.cursor()
cursor.execute(CLONE_SCHEMA_FUNCTION)
cursor.close() | Creates a postgres function `clone_schema` that copies a schema and its
contents. Will replace any existing `clone_schema` functions owned by the
`postgres` superuser. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/clone.py#L203-L211 | null | class CloneSchema(object):
def clone_schema(self, base_schema_name, new_schema_name):
"""
Creates a new schema `new_schema_name` as a clone of an existing schema
`old_schema_name`.
"""
connection.set_schema_to_public()
cursor = connection.cursor()
# check if the clone_schema function already exists in the db
try:
cursor.execute("SELECT 'clone_schema'::regproc")
except ProgrammingError:
self._create_clone_schema_function()
transaction.commit()
sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)'
cursor.execute(
sql,
{'base_schema': base_schema_name, 'new_schema': new_schema_name}
)
cursor.close()
|
tomturner/django-tenants | django_tenants/clone.py | CloneSchema.clone_schema | python | def clone_schema(self, base_schema_name, new_schema_name):
connection.set_schema_to_public()
cursor = connection.cursor()
# check if the clone_schema function already exists in the db
try:
cursor.execute("SELECT 'clone_schema'::regproc")
except ProgrammingError:
self._create_clone_schema_function()
transaction.commit()
sql = 'SELECT clone_schema(%(base_schema)s, %(new_schema)s, TRUE)'
cursor.execute(
sql,
{'base_schema': base_schema_name, 'new_schema': new_schema_name}
)
cursor.close() | Creates a new schema `new_schema_name` as a clone of an existing schema
`old_schema_name`. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/clone.py#L213-L233 | [
"def _create_clone_schema_function(self):\n \"\"\"\n Creates a postgres function `clone_schema` that copies a schema and its\n contents. Will replace any existing `clone_schema` functions owned by the\n `postgres` superuser.\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(CLONE_SCHEMA_... | class CloneSchema(object):
def _create_clone_schema_function(self):
"""
Creates a postgres function `clone_schema` that copies a schema and its
contents. Will replace any existing `clone_schema` functions owned by the
`postgres` superuser.
"""
cursor = connection.cursor()
cursor.execute(CLONE_SCHEMA_FUNCTION)
cursor.close()
|
tomturner/django-tenants | fabfile.py | install_database | python | def install_database(name, owner, template='template0', encoding='UTF8', locale='en_US.UTF-8'):
create_database(name, owner, template=template, encoding=encoding,
locale=locale) | Require a PostgreSQL database.
::
from fabtools import require
require.postgres.database('myapp', owner='dbuser') | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/fabfile.py#L67-L81 | null | from fabric.decorators import task, hosts
from fabric.api import env, run, sudo
from fabtools.postgres import drop_database
from fabtools.vagrant import vagrant
from fabtools.deb import update_index
import fabtools
from fabric.context_managers import cd
from fabtools.require.postgres import create_database
@task
def vagrant():
env.user = 'vagrant'
env.hosts = ['127.0.0.1:2020']
env.passwords = {'vagrant@127.0.0.1:2020': 'vagrant'}
env.psql_db = 'tenant_tutorial'
env.psql_user = 'tenant_tutorial'
env.psql_password = 'qwerty'
env.backup_path = '/vagrant/database_backup/'
env.user = 'vagrant'
env.deploy_user = 'vagrant'
env.passwords = {'vagrant@127.0.0.1:2020': 'vagrant'}
env.vagrant = True
return env.hosts
@task
def provision_vagrant():
vagrant()
update_index()
# fabtools.require.postfix.server('example.com')
update_requirements()
create_pg_database()
django_manage("migrate")
django_migrate()
@task
def create_superuser():
django_manage("createsuperuser")
@task
def django_manage(command):
with cd("/vagrant/examples/tenant_tutorial/"):
run("python3 manage.py %s" % command)
def update_requirements():
fabtools.require.deb.packages(['python3',
'python-virtualenv',
'python3-dev',
'python3-setuptools',
'libffi-dev',
'libxslt1-dev',
'python3-pip',
'python3-psycopg2',
'git',
'libboost-python1.58.0',
'pkg-config',
'postgresql-server-dev-9.5',
'postgresql-contrib',
])
sudo("pip3 install django==2.1.5")
@task
def create_pg_database():
# fabtools.require.postgres.server()
fabtools.require.postgres.user(env.psql_user, env.psql_password, createdb=True)
# fabtools.require.postgres.database(env.psql_db, env.psql_user)
sudo("sed -i 's/all peer/all"
" md5/g' /etc/postgresql/9.5/main/pg_hba.conf")
sudo('service postgresql restart')
install_database(env.psql_db, env.psql_user)
@task
def reset_database():
sudo('service postgresql restart')
try:
drop_database(env.psql_db)
except:
pass
create_pg_database()
django_migrate()
def django_migrate():
django_manage("migrate_schemas")
@task
def create_tenant():
django_manage("create_tenant")
@task
def runserver():
django_manage("runserver 0.0.0.0:8088")
|
tomturner/django-tenants | django_tenants/template/loaders/filesystem.py | Loader.dirs | python | def dirs(self):
if self._dirs.get(connection.schema_name, None) is None:
try:
# Use directories configured via MULTITENANT_TEMPLATE_DIRS
dirs = [
utils.parse_tenant_config_path(dir_)
for dir_ in settings.MULTITENANT_TEMPLATE_DIRS
]
except AttributeError:
raise ImproperlyConfigured(
"To use {}.{} you must define the MULTITENANT_TEMPLATE_DIRS setting.".format(
__name__, Loader.__name__
)
)
self.dirs = dirs
return self._dirs[connection.schema_name] | Lazy retrieval of list of template directories based on current tenant schema.
:return: The list of template file dirs that have been configured for this tenant. | train | https://github.com/tomturner/django-tenants/blob/f3e06e2b0facee7ed797e5694bcac433df3e5315/django_tenants/template/loaders/filesystem.py#L24-L45 | null | class Loader(BaseLoader):
def __init__(self, engine, dirs=None):
self._dirs = {}
super().__init__(engine)
if dirs is not None:
self.dirs = dirs
@property
@dirs.setter
def dirs(self, value):
self._dirs[connection.schema_name] = value
|
awkman/pywifi | pywifi/wifi.py | PyWiFi.interfaces | python | def interfaces(self):
self._ifaces = []
wifi_ctrl = wifiutil.WifiUtil()
for interface in wifi_ctrl.interfaces():
iface = Interface(interface)
self._ifaces.append(iface)
self._logger.info("Get interface: %s", iface.name())
if not self._ifaces:
self._logger.error("Can't get wifi interface")
return self._ifaces | Collect the available wlan interfaces. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/wifi.py#L36-L50 | [
"def interfaces(self):\n \"\"\"Get the wifi interface lists.\"\"\"\n\n ifaces = []\n\n if self._wlan_open_handle(CLIENT_VERSION,\n byref(self._nego_version),\n byref(self._handle)) \\\n is not ERROR_SUCCESS:\n self._logger.error(\"Open ... | class PyWiFi:
"""PyWiFi provides operations to manipulate wifi devices."""
_ifaces = []
_logger = None
def __init__(self):
self._logger = logging.getLogger('pywifi')
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.scan_results | python | def scan_results(self, obj):
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list | Get the AP list after scanning. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L258-L309 | [
"def _wlan_get_available_network_list(self, handle,\n iface_guid,\n network_list):\n\n func = native_wifi.WlanGetAvailableNetworkList\n func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(\n POINTER(WLAN_AVAILABLE_NETW... | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.connect | python | def connect(self, obj, params):
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret) | Connect to the specified AP. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L311-L322 | [
"def _wlan_connect(self, handle, iface_guid, params):\n\n func = native_wifi.WlanConnect\n func.argtypes = [HANDLE, POINTER(GUID), POINTER(\n WLAN_CONNECTION_PARAMETERS), c_void_p]\n func.restypes = [DWORD]\n return func(handle, iface_guid, params, None)\n"
] | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.add_network_profile | python | def add_network_profile(self, obj, params):
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params | Add an AP profile for connecting to afterward. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L329-L397 | [
"def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):\n\n func = native_wifi.WlanSetProfile\n func.argtypes = [HANDLE, POINTER(\n GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]\n func.restypes = [DWORD]\n return func(handle, iface_guid, 2, xml, None... | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.network_profile_name_list | python | def network_profile_name_list(self, obj):
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list | Get AP profile names. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L399-L416 | [
"def _wlan_get_profile_list(self, handle, iface_guid, profile_list):\n\n func = native_wifi.WlanGetProfileList\n func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(\n POINTER(WLAN_PROFILE_INFO_LIST))]\n func.restypes = [DWORD]\n return func(handle, iface_guid, None, profile_list)\n"
] | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.network_profiles | python | def network_profiles(self, obj):
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list | Get AP profiles. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L418-L450 | [
"def network_profile_name_list(self, obj):\n \"\"\"Get AP profile names.\"\"\"\n\n profile_list = pointer(WLAN_PROFILE_INFO_LIST())\n self._wlan_get_profile_list(self._handle,\n byref(obj['guid']),\n byref(profile_list))\n profiles = cast(pro... | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.remove_network_profile | python | def remove_network_profile(self, obj, params):
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret) | Remove the specified AP profile. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L452-L458 | [
"def _wlan_delete_profile(self, handle, iface_guid, profile_name):\n\n func = native_wifi.WlanDeleteProfile\n func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]\n func.restypes = [DWORD]\n return func(handle, iface_guid, profile_name, None)\n"
] | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.remove_all_network_profiles | python | def remove_all_network_profiles(self, obj):
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret) | Remove all the AP profiles. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L460-L469 | [
"def network_profile_name_list(self, obj):\n \"\"\"Get AP profile names.\"\"\"\n\n profile_list = pointer(WLAN_PROFILE_INFO_LIST())\n self._wlan_get_profile_list(self._handle,\n byref(obj['guid']),\n byref(profile_list))\n profiles = cast(pro... | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.status | python | def status(self, obj):
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value] | Get the wifi interface status. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L471-L481 | [
"def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):\n\n func = native_wifi.WlanQueryInterface\n func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(\n DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]\n func.restypes = [DWORD]\n return fun... | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_win.py | WifiUtil.interfaces | python | def interfaces(self):
ifaces = []
if self._wlan_open_handle(CLIENT_VERSION,
byref(self._nego_version),
byref(self._handle)) \
is not ERROR_SUCCESS:
self._logger.error("Open handle failed!")
if self._wlan_enum_interfaces(self._handle, byref(self._ifaces)) \
is not ERROR_SUCCESS:
self._logger.error("Enum interface failed!")
interfaces = cast(self._ifaces.contents.InterfaceInfo,
POINTER(WLAN_INTERFACE_INFO))
for i in range(0, self._ifaces.contents.dwNumberOfItems):
iface = {}
iface['guid'] = interfaces[i].InterfaceGuid
iface['name'] = interfaces[i].strInterfaceDescription
ifaces.append(iface)
return ifaces | Get the wifi interface lists. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_win.py#L483-L506 | [
"def _wlan_open_handle(self, client_version, _nego_version, handle):\n\n func = native_wifi.WlanOpenHandle\n func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]\n func.restypes = [DWORD]\n return func(client_version, None, _nego_version, handle)\n",
"def _wlan_enum_interfaces(self, hand... | class WifiUtil():
"""WifiUtil implements the wifi functions in Windows."""
_nego_version = DWORD()
_handle = HANDLE()
_ifaces = pointer(WLAN_INTERFACE_INFO_LIST())
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._wlan_scan(self._handle, byref(obj['guid']))
def scan_results(self, obj):
"""Get the AP list after scanning."""
avail_network_list = pointer(WLAN_AVAILABLE_NETWORK_LIST())
self._wlan_get_available_network_list(self._handle,
byref(obj['guid']), byref(avail_network_list))
networks = cast(avail_network_list.contents.Network,
POINTER(WLAN_AVAILABLE_NETWORK))
self._logger.debug("Scan found %d networks.",
avail_network_list.contents.dwNumberOfItems)
network_list = []
for i in range(avail_network_list.contents.dwNumberOfItems):
if networks[i].dot11BssType == 1 and networks[i].bNetworkConnectable :
ssid = ''
for j in range(networks[i].dot11Ssid.uSSIDLength):
if networks[i].dot11Ssid.ucSSID != b'':
ssid += "%c" % networks[i].dot11Ssid.ucSSID[j]
bss_list = pointer(WLAN_BSS_LIST())
self._wlan_get_network_bss_list(self._handle,
byref(obj['guid']), byref(bss_list), networks[i].dot11Ssid, networks[i].bSecurityEnabled)
bsses = cast(bss_list.contents.wlanBssEntries,
POINTER(WLAN_BSS_ENTRY))
if networks[i].bSecurityEnabled:
akm = self._get_akm(networks[i].dot11DefaultCipherAlgorithm)
auth_alg = self._get_auth_alg(networks[i].dot11DefaultAuthAlgorithm)
else:
akm = [AKM_TYPE_NONE]
auth_alg = [AUTH_ALG_OPEN]
for j in range(bss_list.contents.dwNumberOfItems):
network = Profile()
network.ssid = ssid
network.bssid = ''
for k in range(6):
network.bssid += "%02x:" % bsses[j].dot11Bssid[k]
network.signal = bsses[j].lRssi
network.freq = bsses[j].ulChCenterFrequency
network.auth = auth_alg
network.akm = akm
network_list.append(network)
return network_list
def connect(self, obj, params):
"""Connect to the specified AP."""
connect_params = WLAN_CONNECTION_PARAMETERS()
connect_params.wlanConnectionMode = 0 # Profile
connect_params.dot11BssType = 1 # infra
profile_name = create_unicode_buffer(params.ssid)
connect_params.strProfile = profile_name.value
ret = self._wlan_connect(
self._handle, obj['guid'], byref(connect_params))
self._logger.debug('connect result: %d', ret)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._wlan_disconnect(self._handle, obj['guid'])
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
reason_code = DWORD()
params.process_akm()
profile_data = {}
profile_data['ssid'] = params.ssid
if AKM_TYPE_NONE in params.akm:
profile_data['auth'] = auth_value_to_str_dict[params.auth]
profile_data['encrypt'] = "none"
else:
profile_data['auth'] = akm_value_to_str_dict[params.akm[-1]]
profile_data['encrypt'] = cipher_value_to_str_dict[params.cipher]
profile_data['key'] = params.key
profile_data['protected'] = 'false'
profile_data['profile_name'] = params.ssid
xml = """<?xml version="1.0"?>
<WLANProfile xmlns="http://www.microsoft.com/networking/WLAN/profile/v1">
<name>{profile_name}</name>
<SSIDConfig>
<SSID>
<name>{ssid}</name>
</SSID>
</SSIDConfig>
<connectionType>ESS</connectionType>
<connectionMode>manual</connectionMode>
<MSM>
<security>
<authEncryption>
<authentication>{auth}</authentication>
<encryption>{encrypt}</encryption>
<useOneX>false</useOneX>
</authEncryption>
"""
if AKM_TYPE_NONE not in params.akm:
xml += """<sharedKey>
<keyType>passPhrase</keyType>
<protected>{protected}</protected>
<keyMaterial>{key}</keyMaterial>
</sharedKey>"""
xml += """
</security>
</MSM>"""
xml += """<MacRandomization xmlns="http://www.microsoft.com/networking/WLAN/profile/v3">
<enableRandomization>false</enableRandomization>
</MacRandomization>
</WLANProfile>
"""
xml = xml.format(**profile_data)
status = self._wlan_set_profile(self._handle, obj['guid'], xml,
True, byref(reason_code))
if status != ERROR_SUCCESS:
self._logger.debug("Status %d: Add profile failed", status)
buf_size = DWORD(64)
buf = create_unicode_buffer(64)
self._wlan_reason_code_to_str(reason_code, buf_size, buf)
return params
def network_profile_name_list(self, obj):
"""Get AP profile names."""
profile_list = pointer(WLAN_PROFILE_INFO_LIST())
self._wlan_get_profile_list(self._handle,
byref(obj['guid']),
byref(profile_list))
profiles = cast(profile_list.contents.ProfileInfo,
POINTER(WLAN_PROFILE_INFO))
profile_name_list = []
for i in range(profile_list.contents.dwNumberOfItems):
profile_name = ''
for j in range(len(profiles[i].strProfileName)):
profile_name += profiles[i].strProfileName[j]
profile_name_list.append(profile_name)
return profile_name_list
def network_profiles(self, obj):
"""Get AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
profile_list = []
for profile_name in profile_name_list:
profile = Profile()
flags = DWORD()
access = DWORD()
xml = LPWSTR()
self._wlan_get_profile(self._handle, obj['guid'],
profile_name, byref(xml), byref(flags),
byref(access))
# fill profile info
profile.ssid = re.search(r'<name>(.*)</name>', xml.value).group(1)
auth = re.search(r'<authentication>(.*)</authentication>',
xml.value).group(1).upper()
profile.akm = []
if auth not in akm_str_to_value_dict:
if auth not in auth_str_to_value_dict:
profile.auth = AUTH_ALG_OPEN
else:
profile.auth = auth_str_to_value_dict[auth]
profile.akm.append(AKM_TYPE_NONE)
else:
profile.auth = AUTH_ALG_OPEN
profile.akm.append(akm_str_to_value_dict[auth])
profile_list.append(profile)
return profile_list
def remove_network_profile(self, obj, params):
"""Remove the specified AP profile."""
self._logger.debug("delete profile: %s", params.ssid)
str_buf = create_unicode_buffer(params.ssid)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
profile_name_list = self.network_profile_name_list(obj)
for profile_name in profile_name_list:
self._logger.debug("delete profile: %s", profile_name)
str_buf = create_unicode_buffer(profile_name)
ret = self._wlan_delete_profile(self._handle, obj['guid'], str_buf)
self._logger.debug("delete result %d", ret)
def status(self, obj):
"""Get the wifi interface status."""
data_size = DWORD()
data = PDWORD()
opcode_value_type = DWORD()
self._wlan_query_interface(self._handle, obj['guid'], 6,
byref(data_size), byref(data),
byref(opcode_value_type))
return status_dict[data.contents.value]
def _wlan_open_handle(self, client_version, _nego_version, handle):
func = native_wifi.WlanOpenHandle
func.argtypes = [DWORD, c_void_p, POINTER(DWORD), POINTER(HANDLE)]
func.restypes = [DWORD]
return func(client_version, None, _nego_version, handle)
def _wlan_close_handle(self, handle):
func = native_wifi.WlanCloseHandle
func.argtypes = [HANDLE, c_void_p]
func.restypes = [DWORD]
return func(handle, None)
def _wlan_enum_interfaces(self, handle, ifaces):
func = native_wifi.WlanEnumInterfaces
func.argtypes = [HANDLE, c_void_p, POINTER(
POINTER(WLAN_INTERFACE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, None, ifaces)
def _wlan_get_available_network_list(self, handle,
iface_guid,
network_list):
func = native_wifi.WlanGetAvailableNetworkList
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
POINTER(WLAN_AVAILABLE_NETWORK_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, None, network_list)
def _wlan_get_network_bss_list(self, handle, iface_guid, bss_list, ssid = None, security = False):
func = native_wifi.WlanGetNetworkBssList
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), c_uint, c_bool, c_void_p, POINTER(POINTER(WLAN_BSS_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, ssid, 1, security, None, bss_list)
def _wlan_scan(self, handle, iface_guid):
func = native_wifi.WlanScan
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
DOT11_SSID), POINTER(WLAN_RAW_DATA), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None, None, None)
def _wlan_connect(self, handle, iface_guid, params):
func = native_wifi.WlanConnect
func.argtypes = [HANDLE, POINTER(GUID), POINTER(
WLAN_CONNECTION_PARAMETERS), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, params, None)
def _wlan_set_profile(self, handle, iface_guid, xml, overwrite, reason_code):
func = native_wifi.WlanSetProfile
func.argtypes = [HANDLE, POINTER(
GUID), DWORD, c_wchar_p, c_wchar_p, c_bool, c_void_p, POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, 2, xml, None, overwrite, None, reason_code)
def _wlan_reason_code_to_str(self, reason_code, buf_size, buf):
func = native_wifi.WlanReasonCodeToString
func.argtypes = [DWORD, DWORD, PWCHAR, c_void_p]
func.restypes = [DWORD]
return func(reason_code, buf_size, buf, None)
def _wlan_get_profile_list(self, handle, iface_guid, profile_list):
func = native_wifi.WlanGetProfileList
func.argtypes = [HANDLE, POINTER(GUID), c_void_p, POINTER(
POINTER(WLAN_PROFILE_INFO_LIST))]
func.restypes = [DWORD]
return func(handle, iface_guid, None, profile_list)
def _wlan_get_profile(self, handle, iface_guid, profile_name, xml, flags, access):
func = native_wifi.WlanGetProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p, POINTER(
c_wchar_p), POINTER(DWORD), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None, xml, flags, access)
def _wlan_delete_profile(self, handle, iface_guid, profile_name):
func = native_wifi.WlanDeleteProfile
func.argtypes = [HANDLE, POINTER(GUID), c_wchar_p, c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, profile_name, None)
def _wlan_query_interface(self, handle, iface_guid, opcode, data_size, data, opcode_value_type):
func = native_wifi.WlanQueryInterface
func.argtypes = [HANDLE, POINTER(GUID), DWORD, c_void_p, POINTER(
DWORD), POINTER(POINTER(DWORD)), POINTER(DWORD)]
func.restypes = [DWORD]
return func(handle, iface_guid, opcode, None, data_size, data, opcode_value_type)
def _wlan_disconnect(self, handle, iface_guid):
func = native_wifi.WlanDisconnect
func.argtypes = [HANDLE, POINTER(GUID), c_void_p]
func.restypes = [DWORD]
return func(handle, iface_guid, None)
def _get_auth_alg(self, auth_val):
auth_alg = []
if auth_val in [1, 3, 4, 6, 7]:
auth_alg.append(AUTH_ALG_OPEN)
elif auth_val == 2:
auth_alg.append(AUTH_ALG_SHARED)
return auth_alg
def _get_akm(self, akm_val):
akm = []
if akm_val == 2:
akm.append(AKM_TYPE_WPAPSK)
elif akm_val == 4:
akm.append(AKM_TYPE_WPA2PSK)
return akm
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.scan_results | python | def scan_results(self, obj):
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses | Get the AP list after scanning. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L66-L96 | [
"def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):\n\n if 'psk' not in cmd:\n self._logger.info(\"Send cmd '%s' to wpa_s\", cmd)\n sock = self._connections[iface]['sock']\n\n sock.send(bytearray(cmd, 'utf-8'))\n reply = sock.recv(REPLY_SIZE)\n if get_reply:\n return reply.decode... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params
def network_profiles(self, obj):
"""Get AP profiles."""
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks
def remove_network_profile(self, obj, params):
"""Remove the specified AP profiles"""
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id))
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def status(self, obj):
"""Get the wifi interface status."""
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.connect | python | def connect(self, obj, network):
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True) | Connect to the specified AP. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L98-L115 | [
"def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):\n\n if 'psk' not in cmd:\n self._logger.info(\"Send cmd '%s' to wpa_s\", cmd)\n sock = self._connections[iface]['sock']\n\n sock.send(bytearray(cmd, 'utf-8'))\n reply = sock.recv(REPLY_SIZE)\n if get_reply:\n return reply.decode... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def scan_results(self, obj):
"""Get the AP list after scanning."""
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params
def network_profiles(self, obj):
"""Get AP profiles."""
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks
def remove_network_profile(self, obj, params):
"""Remove the specified AP profiles"""
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id))
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def status(self, obj):
"""Get the wifi interface status."""
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.add_network_profile | python | def add_network_profile(self, obj, params):
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params | Add an AP profile for connecting to afterward. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L122-L167 | [
"def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):\n\n if 'psk' not in cmd:\n self._logger.info(\"Send cmd '%s' to wpa_s\", cmd)\n sock = self._connections[iface]['sock']\n\n sock.send(bytearray(cmd, 'utf-8'))\n reply = sock.recv(REPLY_SIZE)\n if get_reply:\n return reply.decode... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def scan_results(self, obj):
"""Get the AP list after scanning."""
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def network_profiles(self, obj):
"""Get AP profiles."""
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks
def remove_network_profile(self, obj, params):
"""Remove the specified AP profiles"""
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id))
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def status(self, obj):
"""Get the wifi interface status."""
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.network_profiles | python | def network_profiles(self, obj):
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks | Get AP profiles. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L169-L244 | [
"def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):\n\n if 'psk' not in cmd:\n self._logger.info(\"Send cmd '%s' to wpa_s\", cmd)\n sock = self._connections[iface]['sock']\n\n sock.send(bytearray(cmd, 'utf-8'))\n reply = sock.recv(REPLY_SIZE)\n if get_reply:\n return reply.decode... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def scan_results(self, obj):
"""Get the AP list after scanning."""
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params
def remove_network_profile(self, obj, params):
"""Remove the specified AP profiles"""
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id))
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def status(self, obj):
"""Get the wifi interface status."""
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.remove_network_profile | python | def remove_network_profile(self, obj, params):
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id)) | Remove the specified AP profiles | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L246-L258 | [
"def network_profiles(self, obj):\n \"\"\"Get AP profiles.\"\"\"\n\n networks = []\n network_ids = []\n network_summary = self._send_cmd_to_wpas(\n obj['name'],\n 'LIST_NETWORKS',\n True)\n network_summary = network_summary[:-1].split('\\n')\n if len(network_summary) == 1:\n ... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def scan_results(self, obj):
"""Get the AP list after scanning."""
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params
def network_profiles(self, obj):
"""Get AP profiles."""
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def status(self, obj):
"""Get the wifi interface status."""
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.status | python | def status(self, obj):
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()] | Get the wifi interface status. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L265-L275 | [
"def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):\n\n if 'psk' not in cmd:\n self._logger.info(\"Send cmd '%s' to wpa_s\", cmd)\n sock = self._connections[iface]['sock']\n\n sock.send(bytearray(cmd, 'utf-8'))\n reply = sock.recv(REPLY_SIZE)\n if get_reply:\n return reply.decode... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def scan_results(self, obj):
"""Get the AP list after scanning."""
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params
def network_profiles(self, obj):
"""Get AP profiles."""
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks
def remove_network_profile(self, obj, params):
"""Remove the specified AP profiles"""
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id))
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/_wifiutil_linux.py | WifiUtil.interfaces | python | def interfaces(self):
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces | Get the wifi interface lists. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/_wifiutil_linux.py#L277-L290 | [
"def _connect_to_wpa_s(self, iface):\n\n ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])\n if ctrl_iface in self._connections:\n self._logger.info(\n \"Connection for iface '%s' aleady existed!\",\n iface)\n\n sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)\n self._rem... | class WifiUtil():
"""WifiUtil implements the wifi functions in Linux."""
_connections = {}
_logger = logging.getLogger('pywifi')
def scan(self, obj):
"""Trigger the wifi interface to scan."""
self._send_cmd_to_wpas(obj['name'], 'SCAN')
def scan_results(self, obj):
"""Get the AP list after scanning."""
bsses = []
bsses_summary = self._send_cmd_to_wpas(obj['name'], 'SCAN_RESULTS', True)
bsses_summary = bsses_summary[:-1].split('\n')
if len(bsses_summary) == 1:
return bsses
for l in bsses_summary[1:]:
values = l.split('\t')
bss = Profile()
bss.bssid = values[0]
bss.freq = int(values[1])
bss.signal = int(values[2])
bss.ssid = values[4]
bss.akm = []
if 'WPA-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPAPSK)
if 'WPA2-PSK' in values[3]:
bss.akm.append(AKM_TYPE_WPA2PSK)
if 'WPA-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA)
if 'WPA2-EAP' in values[3]:
bss.akm.append(AKM_TYPE_WPA2)
bss.auth = AUTH_ALG_OPEN
bsses.append(bss)
return bsses
def connect(self, obj, network):
"""Connect to the specified AP."""
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
values = l.split('\t')
if values[1] == network.ssid:
network_summary = self._send_cmd_to_wpas(
obj['name'],
'SELECT_NETWORK {}'.format(values[0]),
True)
def disconnect(self, obj):
"""Disconnect to the specified AP."""
self._send_cmd_to_wpas(obj['name'], 'DISCONNECT')
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params
def network_profiles(self, obj):
"""Get AP profiles."""
networks = []
network_ids = []
network_summary = self._send_cmd_to_wpas(
obj['name'],
'LIST_NETWORKS',
True)
network_summary = network_summary[:-1].split('\n')
if len(network_summary) == 1:
return networks
for l in network_summary[1:]:
network_ids.append(l.split()[0])
for network_id in network_ids:
network = Profile()
network.id = network_id
ssid = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} ssid'.format(network_id), True)
if ssid.upper().startswith('FAIL'):
continue
else:
network.ssid = ssid[1:-1]
key_mgmt = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} key_mgmt'.format(network_id),
True)
network.akm = []
if key_mgmt.upper().startswith('FAIL'):
continue
else:
if key_mgmt.upper() in ['WPA-PSK']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2PSK)
else:
network.akm.append(AKM_TYPE_WPAPSK)
elif key_mgmt.upper() in ['WPA-EAP']:
proto = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} proto'.format(network_id),
True)
if proto.upper() == 'RSN':
network.akm.append(AKM_TYPE_WPA2)
else:
network.akm.append(AKM_TYPE_WPA)
ciphers = self._send_cmd_to_wpas(
obj['name'],
'GET_NETWORK {} pairwise'.format(network_id),
True).split(' ')
if ciphers[0].upper().startswith('FAIL'):
continue
else:
# Assume the possible ciphers TKIP and CCMP
if len(ciphers) == 1:
network.cipher = cipher_str_to_value(ciphers[0].upper())
elif 'CCMP' in ciphers:
network.cipher = CIPHER_TYPE_CCMP
networks.append(network)
return networks
def remove_network_profile(self, obj, params):
"""Remove the specified AP profiles"""
network_id = -1
profiles = self.network_profiles(obj)
for profile in profiles:
if profile == params:
network_id = profile.id
if network_id != -1:
self._send_cmd_to_wpas(obj['name'],
'REMOVE_NETWORK {}'.format(network_id))
def remove_all_network_profiles(self, obj):
"""Remove all the AP profiles."""
self._send_cmd_to_wpas(obj['name'], 'REMOVE_NETWORK all')
def status(self, obj):
"""Get the wifi interface status."""
reply = self._send_cmd_to_wpas(obj['name'], 'STATUS', True)
result = reply.split('\n')
status = ''
for l in result:
if l.startswith('wpa_state='):
status = l[10:]
return status_dict[status.lower()]
def interfaces(self):
"""Get the wifi interface lists."""
ifaces = []
for f in sorted(os.listdir(CTRL_IFACE_DIR)):
sock_file = '/'.join([CTRL_IFACE_DIR, f])
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
iface = {}
iface['name'] = f
ifaces.append(iface)
self._connect_to_wpa_s(f)
return ifaces
def _connect_to_wpa_s(self, iface):
ctrl_iface = '/'.join([CTRL_IFACE_DIR, iface])
if ctrl_iface in self._connections:
self._logger.info(
"Connection for iface '%s' aleady existed!",
iface)
sock_file = '{}/{}_{}'.format('/tmp', 'pywifi', iface)
self._remove_existed_sock(sock_file)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(sock_file)
sock.connect(ctrl_iface)
send_len = sock.send(b'PING')
retry = CTRL_IFACE_RETRY
while retry >= 0:
reply = sock.recv(REPLY_SIZE)
if reply == b'':
self._logger.error("Connection to '%s' is broken!", iface_ctrl)
break
if reply.startswith(b'PONG'):
self._logger.info(
"Connect to sock '%s' successfully!", ctrl_iface)
self._connections[iface] = {
'sock': sock,
'sock_file': sock_file,
'ctrl_iface': ctrl_iface
}
break
retry -= 1
def _remove_existed_sock(self, sock_file):
if os.path.exists(sock_file):
mode = os.stat(sock_file).st_mode
if stat.S_ISSOCK(mode):
os.remove(sock_file)
def _send_cmd_to_wpas(self, iface, cmd, get_reply=False):
if 'psk' not in cmd:
self._logger.info("Send cmd '%s' to wpa_s", cmd)
sock = self._connections[iface]['sock']
sock.send(bytearray(cmd, 'utf-8'))
reply = sock.recv(REPLY_SIZE)
if get_reply:
return reply.decode('utf-8')
if reply != b'OK\n':
self._logger.error(
"Unexpected resp '%s' for Command '%s'",
reply.decode('utf-8'),
cmd)
|
awkman/pywifi | pywifi/iface.py | Interface.scan | python | def scan(self):
self._logger.info("iface '%s' scans", self.name())
self._wifi_ctrl.scan(self._raw_obj) | Trigger the wifi interface to scan. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/iface.py#L41-L46 | [
"def name(self):\n \"\"\"\"Get the name of the wifi interfacce.\"\"\"\n\n return self._raw_obj['name']\n"
] | class Interface:
"""Interface provides methods for manipulating wifi devices."""
"""
For encapsulating OS dependent behavior, we declare _raw_obj here for
storing some common attribute (e.g. name) and os attributes (e.g. dbus
objects for linux)
"""
_raw_obj = {}
_wifi_ctrl = {}
_logger = None
def __init__(self, raw_obj):
self._raw_obj = raw_obj
self._wifi_ctrl = wifiutil.WifiUtil()
self._logger = logging.getLogger('pywifi')
def name(self):
""""Get the name of the wifi interfacce."""
return self._raw_obj['name']
def scan_results(self):
"""Return the scan result."""
bsses = self._wifi_ctrl.scan_results(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for bss in bsses:
self._logger.info("Find bss:")
self._logger.info("\tbssid: %s", bss.bssid)
self._logger.info("\tssid: %s", bss.ssid)
self._logger.info("\tfreq: %d", bss.freq)
self._logger.info("\tauth: %s", bss.auth)
self._logger.info("\takm: %s", bss.akm)
self._logger.info("\tsignal: %d", bss.signal)
return bsses
def add_network_profile(self, params):
"""Add the info of the AP for connecting afterward."""
return self._wifi_ctrl.add_network_profile(self._raw_obj, params)
def remove_network_profile(self, params):
"""Remove the specified AP settings."""
self._wifi_ctrl.remove_network_profile(self._raw_obj, params)
def remove_all_network_profiles(self):
"""Remove all the AP settings."""
self._wifi_ctrl.remove_all_network_profiles(self._raw_obj)
def network_profiles(self):
"""Get all the AP profiles."""
profiles = self._wifi_ctrl.network_profiles(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for profile in profiles:
self._logger.info("Get profile:")
self._logger.info("\tssid: %s", profile.ssid)
self._logger.info("\tauth: %s", profile.auth)
self._logger.info("\takm: %s", profile.akm)
self._logger.info("\tcipher: %s", profile.cipher)
return profiles
def connect(self, params):
"""Connect to the specified AP."""
self._logger.info("iface '%s' connects to AP: '%s'",
self.name(), params.ssid)
self._wifi_ctrl.connect(self._raw_obj, params)
def disconnect(self):
"""Disconnect from the specified AP."""
self._logger.info("iface '%s' disconnects", self.name())
self._wifi_ctrl.disconnect(self._raw_obj)
def status(self):
"""Get the status of the wifi interface."""
return self._wifi_ctrl.status(self._raw_obj)
|
awkman/pywifi | pywifi/iface.py | Interface.scan_results | python | def scan_results(self):
bsses = self._wifi_ctrl.scan_results(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for bss in bsses:
self._logger.info("Find bss:")
self._logger.info("\tbssid: %s", bss.bssid)
self._logger.info("\tssid: %s", bss.ssid)
self._logger.info("\tfreq: %d", bss.freq)
self._logger.info("\tauth: %s", bss.auth)
self._logger.info("\takm: %s", bss.akm)
self._logger.info("\tsignal: %d", bss.signal)
return bsses | Return the scan result. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/iface.py#L48-L63 | null | class Interface:
"""Interface provides methods for manipulating wifi devices."""
"""
For encapsulating OS dependent behavior, we declare _raw_obj here for
storing some common attribute (e.g. name) and os attributes (e.g. dbus
objects for linux)
"""
_raw_obj = {}
_wifi_ctrl = {}
_logger = None
def __init__(self, raw_obj):
self._raw_obj = raw_obj
self._wifi_ctrl = wifiutil.WifiUtil()
self._logger = logging.getLogger('pywifi')
def name(self):
""""Get the name of the wifi interfacce."""
return self._raw_obj['name']
def scan(self):
"""Trigger the wifi interface to scan."""
self._logger.info("iface '%s' scans", self.name())
self._wifi_ctrl.scan(self._raw_obj)
def scan_results(self):
"""Return the scan result."""
bsses = self._wifi_ctrl.scan_results(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for bss in bsses:
self._logger.info("Find bss:")
self._logger.info("\tbssid: %s", bss.bssid)
self._logger.info("\tssid: %s", bss.ssid)
self._logger.info("\tfreq: %d", bss.freq)
self._logger.info("\tauth: %s", bss.auth)
self._logger.info("\takm: %s", bss.akm)
self._logger.info("\tsignal: %d", bss.signal)
return bsses
def add_network_profile(self, params):
"""Add the info of the AP for connecting afterward."""
return self._wifi_ctrl.add_network_profile(self._raw_obj, params)
def remove_network_profile(self, params):
"""Remove the specified AP settings."""
self._wifi_ctrl.remove_network_profile(self._raw_obj, params)
def remove_all_network_profiles(self):
"""Remove all the AP settings."""
self._wifi_ctrl.remove_all_network_profiles(self._raw_obj)
def network_profiles(self):
"""Get all the AP profiles."""
profiles = self._wifi_ctrl.network_profiles(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for profile in profiles:
self._logger.info("Get profile:")
self._logger.info("\tssid: %s", profile.ssid)
self._logger.info("\tauth: %s", profile.auth)
self._logger.info("\takm: %s", profile.akm)
self._logger.info("\tcipher: %s", profile.cipher)
return profiles
def connect(self, params):
"""Connect to the specified AP."""
self._logger.info("iface '%s' connects to AP: '%s'",
self.name(), params.ssid)
self._wifi_ctrl.connect(self._raw_obj, params)
def disconnect(self):
"""Disconnect from the specified AP."""
self._logger.info("iface '%s' disconnects", self.name())
self._wifi_ctrl.disconnect(self._raw_obj)
def status(self):
"""Get the status of the wifi interface."""
return self._wifi_ctrl.status(self._raw_obj)
|
awkman/pywifi | pywifi/iface.py | Interface.network_profiles | python | def network_profiles(self):
profiles = self._wifi_ctrl.network_profiles(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for profile in profiles:
self._logger.info("Get profile:")
self._logger.info("\tssid: %s", profile.ssid)
self._logger.info("\tauth: %s", profile.auth)
self._logger.info("\takm: %s", profile.akm)
self._logger.info("\tcipher: %s", profile.cipher)
return profiles | Get all the AP profiles. | train | https://github.com/awkman/pywifi/blob/719baf73d8d32c623dbaf5e9de5d973face152a4/pywifi/iface.py#L80-L93 | null | class Interface:
"""Interface provides methods for manipulating wifi devices."""
"""
For encapsulating OS dependent behavior, we declare _raw_obj here for
storing some common attribute (e.g. name) and os attributes (e.g. dbus
objects for linux)
"""
_raw_obj = {}
_wifi_ctrl = {}
_logger = None
def __init__(self, raw_obj):
self._raw_obj = raw_obj
self._wifi_ctrl = wifiutil.WifiUtil()
self._logger = logging.getLogger('pywifi')
def name(self):
""""Get the name of the wifi interfacce."""
return self._raw_obj['name']
def scan(self):
"""Trigger the wifi interface to scan."""
self._logger.info("iface '%s' scans", self.name())
self._wifi_ctrl.scan(self._raw_obj)
def scan_results(self):
"""Return the scan result."""
bsses = self._wifi_ctrl.scan_results(self._raw_obj)
if self._logger.isEnabledFor(logging.INFO):
for bss in bsses:
self._logger.info("Find bss:")
self._logger.info("\tbssid: %s", bss.bssid)
self._logger.info("\tssid: %s", bss.ssid)
self._logger.info("\tfreq: %d", bss.freq)
self._logger.info("\tauth: %s", bss.auth)
self._logger.info("\takm: %s", bss.akm)
self._logger.info("\tsignal: %d", bss.signal)
return bsses
def add_network_profile(self, params):
"""Add the info of the AP for connecting afterward."""
return self._wifi_ctrl.add_network_profile(self._raw_obj, params)
def remove_network_profile(self, params):
"""Remove the specified AP settings."""
self._wifi_ctrl.remove_network_profile(self._raw_obj, params)
def remove_all_network_profiles(self):
"""Remove all the AP settings."""
self._wifi_ctrl.remove_all_network_profiles(self._raw_obj)
def connect(self, params):
"""Connect to the specified AP."""
self._logger.info("iface '%s' connects to AP: '%s'",
self.name(), params.ssid)
self._wifi_ctrl.connect(self._raw_obj, params)
def disconnect(self):
"""Disconnect from the specified AP."""
self._logger.info("iface '%s' disconnects", self.name())
self._wifi_ctrl.disconnect(self._raw_obj)
def status(self):
"""Get the status of the wifi interface."""
return self._wifi_ctrl.status(self._raw_obj)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.