id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
226,600 | adamzap/landslide | landslide/generator.py | Generator.add_toc_entry | def add_toc_entry(self, title, level, slide_number):
""" Adds a new entry to current presentation Table of Contents.
"""
self.__toc.append({'title': title, 'number': slide_number,
'level': level}) | python | def add_toc_entry(self, title, level, slide_number):
self.__toc.append({'title': title, 'number': slide_number,
'level': level}) | [
"def",
"add_toc_entry",
"(",
"self",
",",
"title",
",",
"level",
",",
"slide_number",
")",
":",
"self",
".",
"__toc",
".",
"append",
"(",
"{",
"'title'",
":",
"title",
",",
"'number'",
":",
"slide_number",
",",
"'level'",
":",
"level",
"}",
")"
] | Adds a new entry to current presentation Table of Contents. | [
"Adds",
"a",
"new",
"entry",
"to",
"current",
"presentation",
"Table",
"of",
"Contents",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L177-L181 |
226,601 | adamzap/landslide | landslide/generator.py | Generator.toc | def toc(self):
""" Smart getter for Table of Content list.
"""
toc = []
stack = [toc]
for entry in self.__toc:
entry['sub'] = []
while entry['level'] < len(stack):
stack.pop()
while entry['level'] > len(stack):
stack.append(stack[-1][-1]['sub'])
stack[-1].append(entry)
return toc | python | def toc(self):
toc = []
stack = [toc]
for entry in self.__toc:
entry['sub'] = []
while entry['level'] < len(stack):
stack.pop()
while entry['level'] > len(stack):
stack.append(stack[-1][-1]['sub'])
stack[-1].append(entry)
return toc | [
"def",
"toc",
"(",
"self",
")",
":",
"toc",
"=",
"[",
"]",
"stack",
"=",
"[",
"toc",
"]",
"for",
"entry",
"in",
"self",
".",
"__toc",
":",
"entry",
"[",
"'sub'",
"]",
"=",
"[",
"]",
"while",
"entry",
"[",
"'level'",
"]",
"<",
"len",
"(",
"sta... | Smart getter for Table of Content list. | [
"Smart",
"getter",
"for",
"Table",
"of",
"Content",
"list",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L184-L196 |
226,602 | adamzap/landslide | landslide/generator.py | Generator.execute | def execute(self):
""" Execute this generator regarding its current configuration.
"""
if self.direct:
if self.file_type == 'pdf':
raise IOError(u"Direct output mode is not available for PDF "
"export")
else:
print(self.render().encode(self.encoding))
else:
self.write_and_log()
if self.watch:
from landslide.watcher import watch
self.log(u"Watching %s\n" % self.watch_dir)
watch(self.watch_dir, self.write_and_log) | python | def execute(self):
if self.direct:
if self.file_type == 'pdf':
raise IOError(u"Direct output mode is not available for PDF "
"export")
else:
print(self.render().encode(self.encoding))
else:
self.write_and_log()
if self.watch:
from landslide.watcher import watch
self.log(u"Watching %s\n" % self.watch_dir)
watch(self.watch_dir, self.write_and_log) | [
"def",
"execute",
"(",
"self",
")",
":",
"if",
"self",
".",
"direct",
":",
"if",
"self",
".",
"file_type",
"==",
"'pdf'",
":",
"raise",
"IOError",
"(",
"u\"Direct output mode is not available for PDF \"",
"\"export\"",
")",
"else",
":",
"print",
"(",
"self",
... | Execute this generator regarding its current configuration. | [
"Execute",
"this",
"generator",
"regarding",
"its",
"current",
"configuration",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L198-L215 |
226,603 | adamzap/landslide | landslide/generator.py | Generator.get_template_file | def get_template_file(self):
""" Retrieves Jinja2 template file path.
"""
if os.path.exists(os.path.join(self.theme_dir, 'base.html')):
return os.path.join(self.theme_dir, 'base.html')
default_dir = os.path.join(THEMES_DIR, 'default')
if not os.path.exists(os.path.join(default_dir, 'base.html')):
raise IOError(u"Cannot find base.html in default theme")
return os.path.join(default_dir, 'base.html') | python | def get_template_file(self):
if os.path.exists(os.path.join(self.theme_dir, 'base.html')):
return os.path.join(self.theme_dir, 'base.html')
default_dir = os.path.join(THEMES_DIR, 'default')
if not os.path.exists(os.path.join(default_dir, 'base.html')):
raise IOError(u"Cannot find base.html in default theme")
return os.path.join(default_dir, 'base.html') | [
"def",
"get_template_file",
"(",
"self",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"theme_dir",
",",
"'base.html'",
")",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"self",... | Retrieves Jinja2 template file path. | [
"Retrieves",
"Jinja2",
"template",
"file",
"path",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L224-L232 |
226,604 | adamzap/landslide | landslide/generator.py | Generator.fetch_contents | def fetch_contents(self, source):
""" Recursively fetches Markdown contents from a single file or
directory containing itself Markdown files.
"""
slides = []
if type(source) is list:
for entry in source:
slides.extend(self.fetch_contents(entry))
elif os.path.isdir(source):
self.log(u"Entering %s" % source)
entries = os.listdir(source)
entries.sort()
for entry in entries:
slides.extend(self.fetch_contents(os.path.join(source, entry)))
else:
try:
parser = Parser(os.path.splitext(source)[1], self.encoding,
self.extensions)
except NotImplementedError:
return slides
self.log(u"Adding %s (%s)" % (source, parser.format))
try:
with codecs.open(source, encoding=self.encoding) as file:
file_contents = file.read()
except UnicodeDecodeError:
self.log(u"Unable to decode source %s: skipping" % source,
'warning')
else:
inner_slides = re.split(r'<hr.+>', parser.parse(file_contents))
for inner_slide in inner_slides:
slides.append(self.get_slide_vars(inner_slide, source))
if not slides:
self.log(u"Exiting %s: no contents found" % source, 'notice')
return slides | python | def fetch_contents(self, source):
slides = []
if type(source) is list:
for entry in source:
slides.extend(self.fetch_contents(entry))
elif os.path.isdir(source):
self.log(u"Entering %s" % source)
entries = os.listdir(source)
entries.sort()
for entry in entries:
slides.extend(self.fetch_contents(os.path.join(source, entry)))
else:
try:
parser = Parser(os.path.splitext(source)[1], self.encoding,
self.extensions)
except NotImplementedError:
return slides
self.log(u"Adding %s (%s)" % (source, parser.format))
try:
with codecs.open(source, encoding=self.encoding) as file:
file_contents = file.read()
except UnicodeDecodeError:
self.log(u"Unable to decode source %s: skipping" % source,
'warning')
else:
inner_slides = re.split(r'<hr.+>', parser.parse(file_contents))
for inner_slide in inner_slides:
slides.append(self.get_slide_vars(inner_slide, source))
if not slides:
self.log(u"Exiting %s: no contents found" % source, 'notice')
return slides | [
"def",
"fetch_contents",
"(",
"self",
",",
"source",
")",
":",
"slides",
"=",
"[",
"]",
"if",
"type",
"(",
"source",
")",
"is",
"list",
":",
"for",
"entry",
"in",
"source",
":",
"slides",
".",
"extend",
"(",
"self",
".",
"fetch_contents",
"(",
"entry... | Recursively fetches Markdown contents from a single file or
directory containing itself Markdown files. | [
"Recursively",
"fetches",
"Markdown",
"contents",
"from",
"a",
"single",
"file",
"or",
"directory",
"containing",
"itself",
"Markdown",
"files",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L234-L272 |
226,605 | adamzap/landslide | landslide/generator.py | Generator.find_theme_dir | def find_theme_dir(self, theme, copy_theme=False):
""" Finds them dir path from its name.
"""
if os.path.exists(theme):
self.theme_dir = theme
elif os.path.exists(os.path.join(THEMES_DIR, theme)):
self.theme_dir = os.path.join(THEMES_DIR, theme)
else:
raise IOError(u"Theme %s not found or invalid" % theme)
target_theme_dir = os.path.join(os.getcwd(), 'theme')
if copy_theme or os.path.exists(target_theme_dir):
self.log(u'Copying %s theme directory to %s'
% (theme, target_theme_dir))
if not os.path.exists(target_theme_dir):
try:
shutil.copytree(self.theme_dir, target_theme_dir)
except Exception as e:
self.log(u"Skipped copy of theme folder: %s" % e)
pass
self.theme_dir = target_theme_dir
return self.theme_dir | python | def find_theme_dir(self, theme, copy_theme=False):
if os.path.exists(theme):
self.theme_dir = theme
elif os.path.exists(os.path.join(THEMES_DIR, theme)):
self.theme_dir = os.path.join(THEMES_DIR, theme)
else:
raise IOError(u"Theme %s not found or invalid" % theme)
target_theme_dir = os.path.join(os.getcwd(), 'theme')
if copy_theme or os.path.exists(target_theme_dir):
self.log(u'Copying %s theme directory to %s'
% (theme, target_theme_dir))
if not os.path.exists(target_theme_dir):
try:
shutil.copytree(self.theme_dir, target_theme_dir)
except Exception as e:
self.log(u"Skipped copy of theme folder: %s" % e)
pass
self.theme_dir = target_theme_dir
return self.theme_dir | [
"def",
"find_theme_dir",
"(",
"self",
",",
"theme",
",",
"copy_theme",
"=",
"False",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"theme",
")",
":",
"self",
".",
"theme_dir",
"=",
"theme",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"o... | Finds them dir path from its name. | [
"Finds",
"them",
"dir",
"path",
"from",
"its",
"name",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L274-L294 |
226,606 | adamzap/landslide | landslide/generator.py | Generator.get_css | def get_css(self):
""" Fetches and returns stylesheet file path or contents, for both
print and screen contexts, depending if we want a standalone
presentation or not.
"""
css = {}
print_css = os.path.join(self.theme_dir, 'css', 'print.css')
if not os.path.exists(print_css):
# Fall back to default theme
print_css = os.path.join(THEMES_DIR, 'default', 'css', 'print.css')
if not os.path.exists(print_css):
raise IOError(u"Cannot find css/print.css in default theme")
with codecs.open(print_css, encoding=self.encoding) as css_file:
css['print'] = {
'path_url': utils.get_path_url(print_css, self.relative),
'contents': css_file.read(),
}
screen_css = os.path.join(self.theme_dir, 'css', 'screen.css')
if (os.path.exists(screen_css)):
with codecs.open(screen_css, encoding=self.encoding) as css_file:
css['screen'] = {
'path_url': utils.get_path_url(screen_css, self.relative),
'contents': css_file.read(),
}
else:
self.log(u"No screen stylesheet provided in current theme",
'warning')
return css | python | def get_css(self):
css = {}
print_css = os.path.join(self.theme_dir, 'css', 'print.css')
if not os.path.exists(print_css):
# Fall back to default theme
print_css = os.path.join(THEMES_DIR, 'default', 'css', 'print.css')
if not os.path.exists(print_css):
raise IOError(u"Cannot find css/print.css in default theme")
with codecs.open(print_css, encoding=self.encoding) as css_file:
css['print'] = {
'path_url': utils.get_path_url(print_css, self.relative),
'contents': css_file.read(),
}
screen_css = os.path.join(self.theme_dir, 'css', 'screen.css')
if (os.path.exists(screen_css)):
with codecs.open(screen_css, encoding=self.encoding) as css_file:
css['screen'] = {
'path_url': utils.get_path_url(screen_css, self.relative),
'contents': css_file.read(),
}
else:
self.log(u"No screen stylesheet provided in current theme",
'warning')
return css | [
"def",
"get_css",
"(",
"self",
")",
":",
"css",
"=",
"{",
"}",
"print_css",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"theme_dir",
",",
"'css'",
",",
"'print.css'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"print_css",
... | Fetches and returns stylesheet file path or contents, for both
print and screen contexts, depending if we want a standalone
presentation or not. | [
"Fetches",
"and",
"returns",
"stylesheet",
"file",
"path",
"or",
"contents",
"for",
"both",
"print",
"and",
"screen",
"contexts",
"depending",
"if",
"we",
"want",
"a",
"standalone",
"presentation",
"or",
"not",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L296-L330 |
226,607 | adamzap/landslide | landslide/generator.py | Generator.get_js | def get_js(self):
""" Fetches and returns javascript file path or contents, depending if
we want a standalone presentation or not.
"""
js_file = os.path.join(self.theme_dir, 'js', 'slides.js')
if not os.path.exists(js_file):
js_file = os.path.join(THEMES_DIR, 'default', 'js', 'slides.js')
if not os.path.exists(js_file):
raise IOError(u"Cannot find slides.js in default theme")
with codecs.open(js_file, encoding=self.encoding) as js_file_obj:
return {
'path_url': utils.get_path_url(js_file, self.relative),
'contents': js_file_obj.read(),
} | python | def get_js(self):
js_file = os.path.join(self.theme_dir, 'js', 'slides.js')
if not os.path.exists(js_file):
js_file = os.path.join(THEMES_DIR, 'default', 'js', 'slides.js')
if not os.path.exists(js_file):
raise IOError(u"Cannot find slides.js in default theme")
with codecs.open(js_file, encoding=self.encoding) as js_file_obj:
return {
'path_url': utils.get_path_url(js_file, self.relative),
'contents': js_file_obj.read(),
} | [
"def",
"get_js",
"(",
"self",
")",
":",
"js_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"theme_dir",
",",
"'js'",
",",
"'slides.js'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"js_file",
")",
":",
"js_file",
"=",
... | Fetches and returns javascript file path or contents, depending if
we want a standalone presentation or not. | [
"Fetches",
"and",
"returns",
"javascript",
"file",
"path",
"or",
"contents",
"depending",
"if",
"we",
"want",
"a",
"standalone",
"presentation",
"or",
"not",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L332-L347 |
226,608 | adamzap/landslide | landslide/generator.py | Generator.get_slide_vars | def get_slide_vars(self, slide_src, source=None):
""" Computes a single slide template vars from its html source code.
Also extracts slide informations for the table of contents.
"""
presenter_notes = None
find = re.search(r'<h\d[^>]*>presenter notes</h\d>', slide_src,
re.DOTALL | re.UNICODE | re.IGNORECASE)
if find:
if self.presenter_notes:
presenter_notes = slide_src[find.end():].strip()
slide_src = slide_src[:find.start()]
find = re.search(r'(<h(\d+?).*?>(.+?)</h\d>)\s?(.+)?', slide_src,
re.DOTALL | re.UNICODE)
if not find:
header = level = title = None
content = slide_src.strip()
else:
header = find.group(1)
level = int(find.group(2))
title = find.group(3)
content = find.group(4).strip() if find.group(4) else find.group(4)
slide_classes = []
if header:
header, _ = self.process_macros(header, source)
if content:
content, slide_classes = self.process_macros(content, source)
source_dict = {}
if source:
source_dict = {'rel_path': source,
'abs_path': os.path.abspath(source)}
if header or content:
return {'header': header, 'title': title, 'level': level,
'content': content, 'classes': slide_classes,
'source': source_dict, 'presenter_notes': presenter_notes,
'math_output': self.math_output} | python | def get_slide_vars(self, slide_src, source=None):
presenter_notes = None
find = re.search(r'<h\d[^>]*>presenter notes</h\d>', slide_src,
re.DOTALL | re.UNICODE | re.IGNORECASE)
if find:
if self.presenter_notes:
presenter_notes = slide_src[find.end():].strip()
slide_src = slide_src[:find.start()]
find = re.search(r'(<h(\d+?).*?>(.+?)</h\d>)\s?(.+)?', slide_src,
re.DOTALL | re.UNICODE)
if not find:
header = level = title = None
content = slide_src.strip()
else:
header = find.group(1)
level = int(find.group(2))
title = find.group(3)
content = find.group(4).strip() if find.group(4) else find.group(4)
slide_classes = []
if header:
header, _ = self.process_macros(header, source)
if content:
content, slide_classes = self.process_macros(content, source)
source_dict = {}
if source:
source_dict = {'rel_path': source,
'abs_path': os.path.abspath(source)}
if header or content:
return {'header': header, 'title': title, 'level': level,
'content': content, 'classes': slide_classes,
'source': source_dict, 'presenter_notes': presenter_notes,
'math_output': self.math_output} | [
"def",
"get_slide_vars",
"(",
"self",
",",
"slide_src",
",",
"source",
"=",
"None",
")",
":",
"presenter_notes",
"=",
"None",
"find",
"=",
"re",
".",
"search",
"(",
"r'<h\\d[^>]*>presenter notes</h\\d>'",
",",
"slide_src",
",",
"re",
".",
"DOTALL",
"|",
"re"... | Computes a single slide template vars from its html source code.
Also extracts slide informations for the table of contents. | [
"Computes",
"a",
"single",
"slide",
"template",
"vars",
"from",
"its",
"html",
"source",
"code",
".",
"Also",
"extracts",
"slide",
"informations",
"for",
"the",
"table",
"of",
"contents",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L349-L394 |
226,609 | adamzap/landslide | landslide/generator.py | Generator.get_template_vars | def get_template_vars(self, slides):
""" Computes template vars from slides html source code.
"""
try:
head_title = slides[0]['title']
except (IndexError, TypeError):
head_title = "Untitled Presentation"
for slide_index, slide_vars in enumerate(slides):
if not slide_vars:
continue
self.num_slides += 1
slide_number = slide_vars['number'] = self.num_slides
if slide_vars['level'] and slide_vars['level'] <= TOC_MAX_LEVEL:
self.add_toc_entry(slide_vars['title'], slide_vars['level'],
slide_number)
else:
# Put something in the TOC even if it doesn't have a title or level
self.add_toc_entry(u"-", 1, slide_number)
return {'head_title': head_title, 'num_slides': str(self.num_slides),
'slides': slides, 'toc': self.toc, 'embed': self.embed,
'css': self.get_css(), 'js': self.get_js(),
'user_css': self.user_css, 'user_js': self.user_js,
'math_output': self.math_output} | python | def get_template_vars(self, slides):
try:
head_title = slides[0]['title']
except (IndexError, TypeError):
head_title = "Untitled Presentation"
for slide_index, slide_vars in enumerate(slides):
if not slide_vars:
continue
self.num_slides += 1
slide_number = slide_vars['number'] = self.num_slides
if slide_vars['level'] and slide_vars['level'] <= TOC_MAX_LEVEL:
self.add_toc_entry(slide_vars['title'], slide_vars['level'],
slide_number)
else:
# Put something in the TOC even if it doesn't have a title or level
self.add_toc_entry(u"-", 1, slide_number)
return {'head_title': head_title, 'num_slides': str(self.num_slides),
'slides': slides, 'toc': self.toc, 'embed': self.embed,
'css': self.get_css(), 'js': self.get_js(),
'user_css': self.user_css, 'user_js': self.user_js,
'math_output': self.math_output} | [
"def",
"get_template_vars",
"(",
"self",
",",
"slides",
")",
":",
"try",
":",
"head_title",
"=",
"slides",
"[",
"0",
"]",
"[",
"'title'",
"]",
"except",
"(",
"IndexError",
",",
"TypeError",
")",
":",
"head_title",
"=",
"\"Untitled Presentation\"",
"for",
"... | Computes template vars from slides html source code. | [
"Computes",
"template",
"vars",
"from",
"slides",
"html",
"source",
"code",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L396-L420 |
226,610 | adamzap/landslide | landslide/generator.py | Generator.parse_config | def parse_config(self, config_source):
""" Parses a landslide configuration file and returns a normalized
python dict.
"""
self.log(u"Config %s" % config_source)
try:
raw_config = configparser.RawConfigParser()
raw_config.read(config_source)
except Exception as e:
raise RuntimeError(u"Invalid configuration file: %s" % e)
config = {}
config['source'] = raw_config.get('landslide', 'source')\
.replace('\r', '').split('\n')
if raw_config.has_option('landslide', 'theme'):
config['theme'] = raw_config.get('landslide', 'theme')
self.log(u"Using configured theme %s" % config['theme'])
if raw_config.has_option('landslide', 'destination'):
config['destination'] = raw_config.get('landslide', 'destination')
if raw_config.has_option('landslide', 'linenos'):
config['linenos'] = raw_config.get('landslide', 'linenos')
for boolopt in ('embed', 'relative', 'copy_theme'):
if raw_config.has_option('landslide', boolopt):
config[boolopt] = raw_config.getboolean('landslide', boolopt)
if raw_config.has_option('landslide', 'extensions'):
config['extensions'] = ",".join(raw_config.get('landslide', 'extensions')\
.replace('\r', '').split('\n'))
if raw_config.has_option('landslide', 'css'):
config['css'] = raw_config.get('landslide', 'css')\
.replace('\r', '').split('\n')
if raw_config.has_option('landslide', 'js'):
config['js'] = raw_config.get('landslide', 'js')\
.replace('\r', '').split('\n')
return config | python | def parse_config(self, config_source):
self.log(u"Config %s" % config_source)
try:
raw_config = configparser.RawConfigParser()
raw_config.read(config_source)
except Exception as e:
raise RuntimeError(u"Invalid configuration file: %s" % e)
config = {}
config['source'] = raw_config.get('landslide', 'source')\
.replace('\r', '').split('\n')
if raw_config.has_option('landslide', 'theme'):
config['theme'] = raw_config.get('landslide', 'theme')
self.log(u"Using configured theme %s" % config['theme'])
if raw_config.has_option('landslide', 'destination'):
config['destination'] = raw_config.get('landslide', 'destination')
if raw_config.has_option('landslide', 'linenos'):
config['linenos'] = raw_config.get('landslide', 'linenos')
for boolopt in ('embed', 'relative', 'copy_theme'):
if raw_config.has_option('landslide', boolopt):
config[boolopt] = raw_config.getboolean('landslide', boolopt)
if raw_config.has_option('landslide', 'extensions'):
config['extensions'] = ",".join(raw_config.get('landslide', 'extensions')\
.replace('\r', '').split('\n'))
if raw_config.has_option('landslide', 'css'):
config['css'] = raw_config.get('landslide', 'css')\
.replace('\r', '').split('\n')
if raw_config.has_option('landslide', 'js'):
config['js'] = raw_config.get('landslide', 'js')\
.replace('\r', '').split('\n')
return config | [
"def",
"parse_config",
"(",
"self",
",",
"config_source",
")",
":",
"self",
".",
"log",
"(",
"u\"Config %s\"",
"%",
"config_source",
")",
"try",
":",
"raw_config",
"=",
"configparser",
".",
"RawConfigParser",
"(",
")",
"raw_config",
".",
"read",
"(",
"conf... | Parses a landslide configuration file and returns a normalized
python dict. | [
"Parses",
"a",
"landslide",
"configuration",
"file",
"and",
"returns",
"a",
"normalized",
"python",
"dict",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L435-L467 |
226,611 | adamzap/landslide | landslide/generator.py | Generator.process_macros | def process_macros(self, content, source=None):
""" Processed all macros.
"""
macro_options = {'relative': self.relative, 'linenos': self.linenos}
classes = []
for macro_class in self.macros:
try:
macro = macro_class(logger=self.logger, embed=self.embed,
options=macro_options)
content, add_classes = macro.process(content, source)
if add_classes:
classes += add_classes
except Exception as e:
self.log(u"%s processing failed in %s: %s"
% (macro, source, e))
return content, classes | python | def process_macros(self, content, source=None):
macro_options = {'relative': self.relative, 'linenos': self.linenos}
classes = []
for macro_class in self.macros:
try:
macro = macro_class(logger=self.logger, embed=self.embed,
options=macro_options)
content, add_classes = macro.process(content, source)
if add_classes:
classes += add_classes
except Exception as e:
self.log(u"%s processing failed in %s: %s"
% (macro, source, e))
return content, classes | [
"def",
"process_macros",
"(",
"self",
",",
"content",
",",
"source",
"=",
"None",
")",
":",
"macro_options",
"=",
"{",
"'relative'",
":",
"self",
".",
"relative",
",",
"'linenos'",
":",
"self",
".",
"linenos",
"}",
"classes",
"=",
"[",
"]",
"for",
"mac... | Processed all macros. | [
"Processed",
"all",
"macros",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L469-L484 |
226,612 | adamzap/landslide | landslide/generator.py | Generator.register_macro | def register_macro(self, *macros):
""" Registers macro classes passed a method arguments.
"""
for m in macros:
if inspect.isclass(m) and issubclass(m, macro_module.Macro):
self.macros.append(m)
else:
raise TypeError("Coundn't register macro; a macro must inherit"
" from macro.Macro") | python | def register_macro(self, *macros):
for m in macros:
if inspect.isclass(m) and issubclass(m, macro_module.Macro):
self.macros.append(m)
else:
raise TypeError("Coundn't register macro; a macro must inherit"
" from macro.Macro") | [
"def",
"register_macro",
"(",
"self",
",",
"*",
"macros",
")",
":",
"for",
"m",
"in",
"macros",
":",
"if",
"inspect",
".",
"isclass",
"(",
"m",
")",
"and",
"issubclass",
"(",
"m",
",",
"macro_module",
".",
"Macro",
")",
":",
"self",
".",
"macros",
... | Registers macro classes passed a method arguments. | [
"Registers",
"macro",
"classes",
"passed",
"a",
"method",
"arguments",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L486-L494 |
226,613 | adamzap/landslide | landslide/generator.py | Generator.render | def render(self):
""" Returns generated html code.
"""
with codecs.open(self.template_file, encoding=self.encoding) as template_src:
template = jinja2.Template(template_src.read())
slides = self.fetch_contents(self.source)
context = self.get_template_vars(slides)
html = template.render(context)
if self.embed:
images = re.findall(r'url\(["\']?(.*?\.(?:jpe?g|gif|png|svg)[\'"]?)\)',
html, re.DOTALL | re.UNICODE)
for img_url in images:
img_url = img_url.replace('"', '').replace("'", '')
if self.theme_dir:
source = os.path.join(self.theme_dir, 'css')
else:
source = os.path.join(THEMES_DIR, self.theme, 'css')
encoded_url = utils.encode_image_from_url(img_url, source)
if encoded_url:
html = html.replace(img_url, encoded_url, 1)
self.log("Embedded theme image %s from theme directory %s" % (img_url, source))
else:
# Missing file in theme directory. Try user_css folders
found = False
for css_entry in context['user_css']:
directory = os.path.dirname(css_entry['path_url'])
if not directory:
directory = "."
encoded_url = utils.encode_image_from_url(img_url, directory)
if encoded_url:
found = True
html = html.replace(img_url, encoded_url, 1)
self.log("Embedded theme image %s from directory %s" % (img_url, directory))
if not found:
# Missing image file, etc...
self.log(u"Failed to embed theme image %s" % img_url)
return html | python | def render(self):
with codecs.open(self.template_file, encoding=self.encoding) as template_src:
template = jinja2.Template(template_src.read())
slides = self.fetch_contents(self.source)
context = self.get_template_vars(slides)
html = template.render(context)
if self.embed:
images = re.findall(r'url\(["\']?(.*?\.(?:jpe?g|gif|png|svg)[\'"]?)\)',
html, re.DOTALL | re.UNICODE)
for img_url in images:
img_url = img_url.replace('"', '').replace("'", '')
if self.theme_dir:
source = os.path.join(self.theme_dir, 'css')
else:
source = os.path.join(THEMES_DIR, self.theme, 'css')
encoded_url = utils.encode_image_from_url(img_url, source)
if encoded_url:
html = html.replace(img_url, encoded_url, 1)
self.log("Embedded theme image %s from theme directory %s" % (img_url, source))
else:
# Missing file in theme directory. Try user_css folders
found = False
for css_entry in context['user_css']:
directory = os.path.dirname(css_entry['path_url'])
if not directory:
directory = "."
encoded_url = utils.encode_image_from_url(img_url, directory)
if encoded_url:
found = True
html = html.replace(img_url, encoded_url, 1)
self.log("Embedded theme image %s from directory %s" % (img_url, directory))
if not found:
# Missing image file, etc...
self.log(u"Failed to embed theme image %s" % img_url)
return html | [
"def",
"render",
"(",
"self",
")",
":",
"with",
"codecs",
".",
"open",
"(",
"self",
".",
"template_file",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"as",
"template_src",
":",
"template",
"=",
"jinja2",
".",
"Template",
"(",
"template_src",
".",... | Returns generated html code. | [
"Returns",
"generated",
"html",
"code",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L496-L540 |
226,614 | adamzap/landslide | landslide/generator.py | Generator.write | def write(self):
""" Writes generated presentation code into the destination file.
"""
html = self.render()
if self.file_type == 'pdf':
self.write_pdf(html)
else:
with codecs.open(self.destination_file, 'w',
encoding='utf_8') as outfile:
outfile.write(html) | python | def write(self):
html = self.render()
if self.file_type == 'pdf':
self.write_pdf(html)
else:
with codecs.open(self.destination_file, 'w',
encoding='utf_8') as outfile:
outfile.write(html) | [
"def",
"write",
"(",
"self",
")",
":",
"html",
"=",
"self",
".",
"render",
"(",
")",
"if",
"self",
".",
"file_type",
"==",
"'pdf'",
":",
"self",
".",
"write_pdf",
"(",
"html",
")",
"else",
":",
"with",
"codecs",
".",
"open",
"(",
"self",
".",
"de... | Writes generated presentation code into the destination file. | [
"Writes",
"generated",
"presentation",
"code",
"into",
"the",
"destination",
"file",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L542-L552 |
226,615 | adamzap/landslide | landslide/generator.py | Generator.write_pdf | def write_pdf(self, html):
""" Tries to write a PDF export from the command line using Prince if
available.
"""
try:
f = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
f.write(html.encode('utf_8', 'xmlcharrefreplace'))
f.close()
except Exception:
raise IOError(u"Unable to create temporary file, aborting")
dummy_fh = open(os.path.devnull, 'w')
try:
command = ["prince", f.name, "-o", self.destination_file]
Popen(command, stderr=dummy_fh).communicate()
except Exception:
raise EnvironmentError(u"Unable to generate PDF file using "
"prince. Is it installed and available?")
finally:
dummy_fh.close() | python | def write_pdf(self, html):
try:
f = tempfile.NamedTemporaryFile(delete=False, suffix='.html')
f.write(html.encode('utf_8', 'xmlcharrefreplace'))
f.close()
except Exception:
raise IOError(u"Unable to create temporary file, aborting")
dummy_fh = open(os.path.devnull, 'w')
try:
command = ["prince", f.name, "-o", self.destination_file]
Popen(command, stderr=dummy_fh).communicate()
except Exception:
raise EnvironmentError(u"Unable to generate PDF file using "
"prince. Is it installed and available?")
finally:
dummy_fh.close() | [
"def",
"write_pdf",
"(",
"self",
",",
"html",
")",
":",
"try",
":",
"f",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
",",
"suffix",
"=",
"'.html'",
")",
"f",
".",
"write",
"(",
"html",
".",
"encode",
"(",
"'utf_8'",
",",
... | Tries to write a PDF export from the command line using Prince if
available. | [
"Tries",
"to",
"write",
"a",
"PDF",
"export",
"from",
"the",
"command",
"line",
"using",
"Prince",
"if",
"available",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/generator.py#L554-L575 |
226,616 | adamzap/landslide | landslide/main.py | log | def log(message, type):
"""Log notices to stdout and errors to stderr"""
(sys.stdout if type == 'notice' else sys.stderr).write(message + "\n") | python | def log(message, type):
(sys.stdout if type == 'notice' else sys.stderr).write(message + "\n") | [
"def",
"log",
"(",
"message",
",",
"type",
")",
":",
"(",
"sys",
".",
"stdout",
"if",
"type",
"==",
"'notice'",
"else",
"sys",
".",
"stderr",
")",
".",
"write",
"(",
"message",
"+",
"\"\\n\"",
")"
] | Log notices to stdout and errors to stderr | [
"Log",
"notices",
"to",
"stdout",
"and",
"errors",
"to",
"stderr"
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/main.py#L150-L153 |
226,617 | adamzap/landslide | landslide/main.py | run | def run(input_file, options):
"""Runs the Generator using parsed options."""
options.logger = log
generator.Generator(input_file, **options.__dict__).execute() | python | def run(input_file, options):
options.logger = log
generator.Generator(input_file, **options.__dict__).execute() | [
"def",
"run",
"(",
"input_file",
",",
"options",
")",
":",
"options",
".",
"logger",
"=",
"log",
"generator",
".",
"Generator",
"(",
"input_file",
",",
"*",
"*",
"options",
".",
"__dict__",
")",
".",
"execute",
"(",
")"
] | Runs the Generator using parsed options. | [
"Runs",
"the",
"Generator",
"using",
"parsed",
"options",
"."
] | 59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832 | https://github.com/adamzap/landslide/blob/59b0403d7a7cca4b8ff6ba7fb76efb9748b3f832/landslide/main.py#L156-L160 |
226,618 | Diaoul/subliminal | subliminal/providers/napiprojekt.py | get_subhash | def get_subhash(hash):
"""Get a second hash based on napiprojekt's hash.
:param str hash: napiprojekt's hash.
:return: the subhash.
:rtype: str
"""
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in range(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(hash[i], 16)
v = int(hash[t:t + 2], 16)
b.append(('%x' % (v * m))[-1])
return ''.join(b) | python | def get_subhash(hash):
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in range(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(hash[i], 16)
v = int(hash[t:t + 2], 16)
b.append(('%x' % (v * m))[-1])
return ''.join(b) | [
"def",
"get_subhash",
"(",
"hash",
")",
":",
"idx",
"=",
"[",
"0xe",
",",
"0x3",
",",
"0x6",
",",
"0x8",
",",
"0x2",
"]",
"mul",
"=",
"[",
"2",
",",
"2",
",",
"5",
",",
"4",
",",
"3",
"]",
"add",
"=",
"[",
"0",
",",
"0xd",
",",
"0x10",
... | Get a second hash based on napiprojekt's hash.
:param str hash: napiprojekt's hash.
:return: the subhash.
:rtype: str | [
"Get",
"a",
"second",
"hash",
"based",
"on",
"napiprojekt",
"s",
"hash",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/napiprojekt.py#L14-L35 |
226,619 | Diaoul/subliminal | subliminal/subtitle.py | get_subtitle_path | def get_subtitle_path(video_path, language=None, extension='.srt'):
"""Get the subtitle path using the `video_path` and `language`.
:param str video_path: path to the video.
:param language: language of the subtitle to put in the path.
:type language: :class:`~babelfish.language.Language`
:param str extension: extension of the subtitle.
:return: path of the subtitle.
:rtype: str
"""
subtitle_root = os.path.splitext(video_path)[0]
if language:
subtitle_root += '.' + str(language)
return subtitle_root + extension | python | def get_subtitle_path(video_path, language=None, extension='.srt'):
subtitle_root = os.path.splitext(video_path)[0]
if language:
subtitle_root += '.' + str(language)
return subtitle_root + extension | [
"def",
"get_subtitle_path",
"(",
"video_path",
",",
"language",
"=",
"None",
",",
"extension",
"=",
"'.srt'",
")",
":",
"subtitle_root",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"video_path",
")",
"[",
"0",
"]",
"if",
"language",
":",
"subtitle_root",... | Get the subtitle path using the `video_path` and `language`.
:param str video_path: path to the video.
:param language: language of the subtitle to put in the path.
:type language: :class:`~babelfish.language.Language`
:param str extension: extension of the subtitle.
:return: path of the subtitle.
:rtype: str | [
"Get",
"the",
"subtitle",
"path",
"using",
"the",
"video_path",
"and",
"language",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L166-L182 |
226,620 | Diaoul/subliminal | subliminal/subtitle.py | guess_matches | def guess_matches(video, guess, partial=False):
"""Get matches between a `video` and a `guess`.
If a guess is `partial`, the absence information won't be counted as a match.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param guess: the guess.
:type guess: dict
:param bool partial: whether or not the guess is partial.
:return: matches between the `video` and the `guess`.
:rtype: set
"""
matches = set()
if isinstance(video, Episode):
# series
if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series):
matches.add('series')
# title
if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title):
matches.add('title')
# season
if video.season and 'season' in guess and guess['season'] == video.season:
matches.add('season')
# episode
if video.episode and 'episode' in guess and guess['episode'] == video.episode:
matches.add('episode')
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# count "no year" as an information
if not partial and video.original_series and 'year' not in guess:
matches.add('year')
elif isinstance(video, Movie):
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# title
if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title):
matches.add('title')
# release_group
if (video.release_group and 'release_group' in guess and
sanitize_release_group(guess['release_group']) in
get_equivalent_release_groups(sanitize_release_group(video.release_group))):
matches.add('release_group')
# resolution
if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution:
matches.add('resolution')
# format
if video.format and 'format' in guess and guess['format'].lower() == video.format.lower():
matches.add('format')
# video_codec
if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec:
matches.add('video_codec')
# audio_codec
if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec:
matches.add('audio_codec')
return matches | python | def guess_matches(video, guess, partial=False):
matches = set()
if isinstance(video, Episode):
# series
if video.series and 'title' in guess and sanitize(guess['title']) == sanitize(video.series):
matches.add('series')
# title
if video.title and 'episode_title' in guess and sanitize(guess['episode_title']) == sanitize(video.title):
matches.add('title')
# season
if video.season and 'season' in guess and guess['season'] == video.season:
matches.add('season')
# episode
if video.episode and 'episode' in guess and guess['episode'] == video.episode:
matches.add('episode')
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# count "no year" as an information
if not partial and video.original_series and 'year' not in guess:
matches.add('year')
elif isinstance(video, Movie):
# year
if video.year and 'year' in guess and guess['year'] == video.year:
matches.add('year')
# title
if video.title and 'title' in guess and sanitize(guess['title']) == sanitize(video.title):
matches.add('title')
# release_group
if (video.release_group and 'release_group' in guess and
sanitize_release_group(guess['release_group']) in
get_equivalent_release_groups(sanitize_release_group(video.release_group))):
matches.add('release_group')
# resolution
if video.resolution and 'screen_size' in guess and guess['screen_size'] == video.resolution:
matches.add('resolution')
# format
if video.format and 'format' in guess and guess['format'].lower() == video.format.lower():
matches.add('format')
# video_codec
if video.video_codec and 'video_codec' in guess and guess['video_codec'] == video.video_codec:
matches.add('video_codec')
# audio_codec
if video.audio_codec and 'audio_codec' in guess and guess['audio_codec'] == video.audio_codec:
matches.add('audio_codec')
return matches | [
"def",
"guess_matches",
"(",
"video",
",",
"guess",
",",
"partial",
"=",
"False",
")",
":",
"matches",
"=",
"set",
"(",
")",
"if",
"isinstance",
"(",
"video",
",",
"Episode",
")",
":",
"# series",
"if",
"video",
".",
"series",
"and",
"'title'",
"in",
... | Get matches between a `video` and a `guess`.
If a guess is `partial`, the absence information won't be counted as a match.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param guess: the guess.
:type guess: dict
:param bool partial: whether or not the guess is partial.
:return: matches between the `video` and the `guess`.
:rtype: set | [
"Get",
"matches",
"between",
"a",
"video",
"and",
"a",
"guess",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L185-L244 |
226,621 | Diaoul/subliminal | subliminal/subtitle.py | Subtitle.text | def text(self):
"""Content as string
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding`
"""
if not self.content:
return
if self.encoding:
return self.content.decode(self.encoding, errors='replace')
return self.content.decode(self.guess_encoding(), errors='replace') | python | def text(self):
if not self.content:
return
if self.encoding:
return self.content.decode(self.encoding, errors='replace')
return self.content.decode(self.guess_encoding(), errors='replace') | [
"def",
"text",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"content",
":",
"return",
"if",
"self",
".",
"encoding",
":",
"return",
"self",
".",
"content",
".",
"decode",
"(",
"self",
".",
"encoding",
",",
"errors",
"=",
"'replace'",
")",
"return... | Content as string
If :attr:`encoding` is None, the encoding is guessed with :meth:`guess_encoding` | [
"Content",
"as",
"string"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L64-L76 |
226,622 | Diaoul/subliminal | subliminal/subtitle.py | Subtitle.guess_encoding | def guess_encoding(self):
"""Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str
"""
logger.info('Guessing encoding for language %s', self.language)
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
logger.debug('Trying encodings %r', encodings)
for encoding in encodings:
try:
self.content.decode(encoding)
except UnicodeDecodeError:
pass
else:
logger.info('Guessed encoding %s', encoding)
return encoding
logger.warning('Could not guess encoding from language')
# fallback on chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
return encoding | python | def guess_encoding(self):
logger.info('Guessing encoding for language %s', self.language)
# always try utf-8 first
encodings = ['utf-8']
# add language-specific encodings
if self.language.alpha3 == 'zho':
encodings.extend(['gb18030', 'big5'])
elif self.language.alpha3 == 'jpn':
encodings.append('shift-jis')
elif self.language.alpha3 == 'ara':
encodings.append('windows-1256')
elif self.language.alpha3 == 'heb':
encodings.append('windows-1255')
elif self.language.alpha3 == 'tur':
encodings.extend(['iso-8859-9', 'windows-1254'])
elif self.language.alpha3 == 'pol':
# Eastern European Group 1
encodings.extend(['windows-1250'])
elif self.language.alpha3 == 'bul':
# Eastern European Group 2
encodings.extend(['windows-1251'])
else:
# Western European (windows-1252)
encodings.append('latin-1')
# try to decode
logger.debug('Trying encodings %r', encodings)
for encoding in encodings:
try:
self.content.decode(encoding)
except UnicodeDecodeError:
pass
else:
logger.info('Guessed encoding %s', encoding)
return encoding
logger.warning('Could not guess encoding from language')
# fallback on chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
return encoding | [
"def",
"guess_encoding",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Guessing encoding for language %s'",
",",
"self",
".",
"language",
")",
"# always try utf-8 first",
"encodings",
"=",
"[",
"'utf-8'",
"]",
"# add language-specific encodings",
"if",
"self",
... | Guess encoding using the language, falling back on chardet.
:return: the guessed encoding.
:rtype: str | [
"Guess",
"encoding",
"using",
"the",
"language",
"falling",
"back",
"on",
"chardet",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/subtitle.py#L96-L146 |
226,623 | Diaoul/subliminal | subliminal/providers/subscenter.py | SubsCenterProvider._search_url_titles | def _search_url_titles(self, title):
"""Search the URL titles by kind for the given `title`.
:param str title: title to search for.
:return: the URL titles by kind.
:rtype: collections.defaultdict
"""
# make the search
logger.info('Searching title name for %r', title)
r = self.session.get(self.server_url + 'subtitle/search/', params={'q': title}, timeout=10)
r.raise_for_status()
# check for redirections
if r.history and all([h.status_code == 302 for h in r.history]):
logger.debug('Redirected to the subtitles page')
links = [r.url]
else:
# get the suggestions (if needed)
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
links = [link.attrs['href'] for link in soup.select('#processes div.generalWindowTop a')]
logger.debug('Found %d suggestions', len(links))
url_titles = defaultdict(list)
for link in links:
parts = link.split('/')
url_titles[parts[-3]].append(parts[-2])
return url_titles | python | def _search_url_titles(self, title):
# make the search
logger.info('Searching title name for %r', title)
r = self.session.get(self.server_url + 'subtitle/search/', params={'q': title}, timeout=10)
r.raise_for_status()
# check for redirections
if r.history and all([h.status_code == 302 for h in r.history]):
logger.debug('Redirected to the subtitles page')
links = [r.url]
else:
# get the suggestions (if needed)
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
links = [link.attrs['href'] for link in soup.select('#processes div.generalWindowTop a')]
logger.debug('Found %d suggestions', len(links))
url_titles = defaultdict(list)
for link in links:
parts = link.split('/')
url_titles[parts[-3]].append(parts[-2])
return url_titles | [
"def",
"_search_url_titles",
"(",
"self",
",",
"title",
")",
":",
"# make the search",
"logger",
".",
"info",
"(",
"'Searching title name for %r'",
",",
"title",
")",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"server_url",
"+",
"'subtit... | Search the URL titles by kind for the given `title`.
:param str title: title to search for.
:return: the URL titles by kind.
:rtype: collections.defaultdict | [
"Search",
"the",
"URL",
"titles",
"by",
"kind",
"for",
"the",
"given",
"title",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/subscenter.py#L123-L151 |
226,624 | Diaoul/subliminal | subliminal/video.py | Video.age | def age(self):
"""Age of the video"""
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta() | python | def age(self):
if self.exists:
return datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(self.name))
return timedelta() | [
"def",
"age",
"(",
"self",
")",
":",
"if",
"self",
".",
"exists",
":",
"return",
"datetime",
".",
"utcnow",
"(",
")",
"-",
"datetime",
".",
"utcfromtimestamp",
"(",
"os",
".",
"path",
".",
"getmtime",
"(",
"self",
".",
"name",
")",
")",
"return",
"... | Age of the video | [
"Age",
"of",
"the",
"video"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/video.py#L76-L81 |
226,625 | Diaoul/subliminal | subliminal/extensions.py | RegistrableExtensionManager.register | def register(self, entry_point):
"""Register an extension
:param str entry_point: extension to register (entry point syntax).
:raise: ValueError if already registered.
"""
if entry_point in self.registered_extensions:
raise ValueError('Extension already registered')
ep = EntryPoint.parse(entry_point)
if ep.name in self.names():
raise ValueError('An extension with the same name already exist')
ext = self._load_one_plugin(ep, False, (), {}, False)
self.extensions.append(ext)
if self._extensions_by_name is not None:
self._extensions_by_name[ext.name] = ext
self.registered_extensions.insert(0, entry_point) | python | def register(self, entry_point):
if entry_point in self.registered_extensions:
raise ValueError('Extension already registered')
ep = EntryPoint.parse(entry_point)
if ep.name in self.names():
raise ValueError('An extension with the same name already exist')
ext = self._load_one_plugin(ep, False, (), {}, False)
self.extensions.append(ext)
if self._extensions_by_name is not None:
self._extensions_by_name[ext.name] = ext
self.registered_extensions.insert(0, entry_point) | [
"def",
"register",
"(",
"self",
",",
"entry_point",
")",
":",
"if",
"entry_point",
"in",
"self",
".",
"registered_extensions",
":",
"raise",
"ValueError",
"(",
"'Extension already registered'",
")",
"ep",
"=",
"EntryPoint",
".",
"parse",
"(",
"entry_point",
")",... | Register an extension
:param str entry_point: extension to register (entry point syntax).
:raise: ValueError if already registered. | [
"Register",
"an",
"extension"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/extensions.py#L50-L68 |
226,626 | Diaoul/subliminal | subliminal/extensions.py | RegistrableExtensionManager.unregister | def unregister(self, entry_point):
"""Unregister a provider
:param str entry_point: provider to unregister (entry point syntax).
"""
if entry_point not in self.registered_extensions:
raise ValueError('Extension not registered')
ep = EntryPoint.parse(entry_point)
self.registered_extensions.remove(entry_point)
if self._extensions_by_name is not None:
del self._extensions_by_name[ep.name]
for i, ext in enumerate(self.extensions):
if ext.name == ep.name:
del self.extensions[i]
break | python | def unregister(self, entry_point):
if entry_point not in self.registered_extensions:
raise ValueError('Extension not registered')
ep = EntryPoint.parse(entry_point)
self.registered_extensions.remove(entry_point)
if self._extensions_by_name is not None:
del self._extensions_by_name[ep.name]
for i, ext in enumerate(self.extensions):
if ext.name == ep.name:
del self.extensions[i]
break | [
"def",
"unregister",
"(",
"self",
",",
"entry_point",
")",
":",
"if",
"entry_point",
"not",
"in",
"self",
".",
"registered_extensions",
":",
"raise",
"ValueError",
"(",
"'Extension not registered'",
")",
"ep",
"=",
"EntryPoint",
".",
"parse",
"(",
"entry_point",... | Unregister a provider
:param str entry_point: provider to unregister (entry point syntax). | [
"Unregister",
"a",
"provider"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/extensions.py#L70-L86 |
226,627 | Diaoul/subliminal | subliminal/utils.py | hash_opensubtitles | def hash_opensubtitles(video_path):
"""Compute a hash using OpenSubtitles' algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
bytesize = struct.calcsize(b'<q')
with open(video_path, 'rb') as f:
filesize = os.path.getsize(video_path)
filehash = filesize
if filesize < 65536 * 2:
return
for _ in range(65536 // bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(b'<q', filebuffer)
filehash += l_value
filehash &= 0xFFFFFFFFFFFFFFFF # to remain as 64bit number
f.seek(max(0, filesize - 65536), 0)
for _ in range(65536 // bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(b'<q', filebuffer)
filehash += l_value
filehash &= 0xFFFFFFFFFFFFFFFF
returnedhash = '%016x' % filehash
return returnedhash | python | def hash_opensubtitles(video_path):
bytesize = struct.calcsize(b'<q')
with open(video_path, 'rb') as f:
filesize = os.path.getsize(video_path)
filehash = filesize
if filesize < 65536 * 2:
return
for _ in range(65536 // bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(b'<q', filebuffer)
filehash += l_value
filehash &= 0xFFFFFFFFFFFFFFFF # to remain as 64bit number
f.seek(max(0, filesize - 65536), 0)
for _ in range(65536 // bytesize):
filebuffer = f.read(bytesize)
(l_value,) = struct.unpack(b'<q', filebuffer)
filehash += l_value
filehash &= 0xFFFFFFFFFFFFFFFF
returnedhash = '%016x' % filehash
return returnedhash | [
"def",
"hash_opensubtitles",
"(",
"video_path",
")",
":",
"bytesize",
"=",
"struct",
".",
"calcsize",
"(",
"b'<q'",
")",
"with",
"open",
"(",
"video_path",
",",
"'rb'",
")",
"as",
"f",
":",
"filesize",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"vide... | Compute a hash using OpenSubtitles' algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str | [
"Compute",
"a",
"hash",
"using",
"OpenSubtitles",
"algorithm",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L9-L36 |
226,628 | Diaoul/subliminal | subliminal/utils.py | hash_thesubdb | def hash_thesubdb(video_path):
"""Compute a hash using TheSubDB's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 64 * 1024
if os.path.getsize(video_path) < readsize:
return
with open(video_path, 'rb') as f:
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest() | python | def hash_thesubdb(video_path):
readsize = 64 * 1024
if os.path.getsize(video_path) < readsize:
return
with open(video_path, 'rb') as f:
data = f.read(readsize)
f.seek(-readsize, os.SEEK_END)
data += f.read(readsize)
return hashlib.md5(data).hexdigest() | [
"def",
"hash_thesubdb",
"(",
"video_path",
")",
":",
"readsize",
"=",
"64",
"*",
"1024",
"if",
"os",
".",
"path",
".",
"getsize",
"(",
"video_path",
")",
"<",
"readsize",
":",
"return",
"with",
"open",
"(",
"video_path",
",",
"'rb'",
")",
"as",
"f",
... | Compute a hash using TheSubDB's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str | [
"Compute",
"a",
"hash",
"using",
"TheSubDB",
"s",
"algorithm",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L39-L55 |
226,629 | Diaoul/subliminal | subliminal/utils.py | hash_napiprojekt | def hash_napiprojekt(video_path):
"""Compute a hash using NapiProjekt's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str
"""
readsize = 1024 * 1024 * 10
with open(video_path, 'rb') as f:
data = f.read(readsize)
return hashlib.md5(data).hexdigest() | python | def hash_napiprojekt(video_path):
readsize = 1024 * 1024 * 10
with open(video_path, 'rb') as f:
data = f.read(readsize)
return hashlib.md5(data).hexdigest() | [
"def",
"hash_napiprojekt",
"(",
"video_path",
")",
":",
"readsize",
"=",
"1024",
"*",
"1024",
"*",
"10",
"with",
"open",
"(",
"video_path",
",",
"'rb'",
")",
"as",
"f",
":",
"data",
"=",
"f",
".",
"read",
"(",
"readsize",
")",
"return",
"hashlib",
".... | Compute a hash using NapiProjekt's algorithm.
:param str video_path: path of the video.
:return: the hash.
:rtype: str | [
"Compute",
"a",
"hash",
"using",
"NapiProjekt",
"s",
"algorithm",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L58-L69 |
226,630 | Diaoul/subliminal | subliminal/utils.py | hash_shooter | def hash_shooter(video_path):
"""Compute a hash using Shooter's algorithm
:param string video_path: path of the video
:return: the hash
:rtype: string
"""
filesize = os.path.getsize(video_path)
readsize = 4096
if os.path.getsize(video_path) < readsize * 2:
return None
offsets = (readsize, filesize // 3 * 2, filesize // 3, filesize - readsize * 2)
filehash = []
with open(video_path, 'rb') as f:
for offset in offsets:
f.seek(offset)
filehash.append(hashlib.md5(f.read(readsize)).hexdigest())
return ';'.join(filehash) | python | def hash_shooter(video_path):
filesize = os.path.getsize(video_path)
readsize = 4096
if os.path.getsize(video_path) < readsize * 2:
return None
offsets = (readsize, filesize // 3 * 2, filesize // 3, filesize - readsize * 2)
filehash = []
with open(video_path, 'rb') as f:
for offset in offsets:
f.seek(offset)
filehash.append(hashlib.md5(f.read(readsize)).hexdigest())
return ';'.join(filehash) | [
"def",
"hash_shooter",
"(",
"video_path",
")",
":",
"filesize",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"video_path",
")",
"readsize",
"=",
"4096",
"if",
"os",
".",
"path",
".",
"getsize",
"(",
"video_path",
")",
"<",
"readsize",
"*",
"2",
":",
... | Compute a hash using Shooter's algorithm
:param string video_path: path of the video
:return: the hash
:rtype: string | [
"Compute",
"a",
"hash",
"using",
"Shooter",
"s",
"algorithm"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L72-L90 |
226,631 | Diaoul/subliminal | subliminal/utils.py | sanitize | def sanitize(string, ignore_characters=None):
"""Sanitize a string to strip special characters.
:param str string: the string to sanitize.
:param set ignore_characters: characters to ignore.
:return: the sanitized string.
:rtype: str
"""
# only deal with strings
if string is None:
return
ignore_characters = ignore_characters or set()
# replace some characters with one space
characters = {'-', ':', '(', ')', '.'} - ignore_characters
if characters:
string = re.sub(r'[%s]' % re.escape(''.join(characters)), ' ', string)
# remove some characters
characters = {'\''} - ignore_characters
if characters:
string = re.sub(r'[%s]' % re.escape(''.join(characters)), '', string)
# replace multiple spaces with one
string = re.sub(r'\s+', ' ', string)
# strip and lower case
return string.strip().lower() | python | def sanitize(string, ignore_characters=None):
# only deal with strings
if string is None:
return
ignore_characters = ignore_characters or set()
# replace some characters with one space
characters = {'-', ':', '(', ')', '.'} - ignore_characters
if characters:
string = re.sub(r'[%s]' % re.escape(''.join(characters)), ' ', string)
# remove some characters
characters = {'\''} - ignore_characters
if characters:
string = re.sub(r'[%s]' % re.escape(''.join(characters)), '', string)
# replace multiple spaces with one
string = re.sub(r'\s+', ' ', string)
# strip and lower case
return string.strip().lower() | [
"def",
"sanitize",
"(",
"string",
",",
"ignore_characters",
"=",
"None",
")",
":",
"# only deal with strings",
"if",
"string",
"is",
"None",
":",
"return",
"ignore_characters",
"=",
"ignore_characters",
"or",
"set",
"(",
")",
"# replace some characters with one space"... | Sanitize a string to strip special characters.
:param str string: the string to sanitize.
:param set ignore_characters: characters to ignore.
:return: the sanitized string.
:rtype: str | [
"Sanitize",
"a",
"string",
"to",
"strip",
"special",
"characters",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L93-L122 |
226,632 | Diaoul/subliminal | subliminal/utils.py | sanitize_release_group | def sanitize_release_group(string):
"""Sanitize a `release_group` string to remove content in square brackets.
:param str string: the release group to sanitize.
:return: the sanitized release group.
:rtype: str
"""
# only deal with strings
if string is None:
return
# remove content in square brackets
string = re.sub(r'\[\w+\]', '', string)
# strip and upper case
return string.strip().upper() | python | def sanitize_release_group(string):
# only deal with strings
if string is None:
return
# remove content in square brackets
string = re.sub(r'\[\w+\]', '', string)
# strip and upper case
return string.strip().upper() | [
"def",
"sanitize_release_group",
"(",
"string",
")",
":",
"# only deal with strings",
"if",
"string",
"is",
"None",
":",
"return",
"# remove content in square brackets",
"string",
"=",
"re",
".",
"sub",
"(",
"r'\\[\\w+\\]'",
",",
"''",
",",
"string",
")",
"# strip... | Sanitize a `release_group` string to remove content in square brackets.
:param str string: the release group to sanitize.
:return: the sanitized release group.
:rtype: str | [
"Sanitize",
"a",
"release_group",
"string",
"to",
"remove",
"content",
"in",
"square",
"brackets",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/utils.py#L125-L141 |
226,633 | Diaoul/subliminal | subliminal/cli.py | subliminal | def subliminal(ctx, addic7ed, legendastv, opensubtitles, subscenter, cache_dir, debug):
"""Subtitles, faster than your thoughts."""
# create cache directory
try:
os.makedirs(cache_dir)
except OSError:
if not os.path.isdir(cache_dir):
raise
# configure cache
region.configure('dogpile.cache.dbm', expiration_time=timedelta(days=30),
arguments={'filename': os.path.join(cache_dir, cache_file), 'lock_factory': MutexLock})
# configure logging
if debug:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger('subliminal').addHandler(handler)
logging.getLogger('subliminal').setLevel(logging.DEBUG)
# provider configs
ctx.obj = {'provider_configs': {}}
if addic7ed:
ctx.obj['provider_configs']['addic7ed'] = {'username': addic7ed[0], 'password': addic7ed[1]}
if legendastv:
ctx.obj['provider_configs']['legendastv'] = {'username': legendastv[0], 'password': legendastv[1]}
if opensubtitles:
ctx.obj['provider_configs']['opensubtitles'] = {'username': opensubtitles[0], 'password': opensubtitles[1]}
if subscenter:
ctx.obj['provider_configs']['subscenter'] = {'username': subscenter[0], 'password': subscenter[1]} | python | def subliminal(ctx, addic7ed, legendastv, opensubtitles, subscenter, cache_dir, debug):
# create cache directory
try:
os.makedirs(cache_dir)
except OSError:
if not os.path.isdir(cache_dir):
raise
# configure cache
region.configure('dogpile.cache.dbm', expiration_time=timedelta(days=30),
arguments={'filename': os.path.join(cache_dir, cache_file), 'lock_factory': MutexLock})
# configure logging
if debug:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger('subliminal').addHandler(handler)
logging.getLogger('subliminal').setLevel(logging.DEBUG)
# provider configs
ctx.obj = {'provider_configs': {}}
if addic7ed:
ctx.obj['provider_configs']['addic7ed'] = {'username': addic7ed[0], 'password': addic7ed[1]}
if legendastv:
ctx.obj['provider_configs']['legendastv'] = {'username': legendastv[0], 'password': legendastv[1]}
if opensubtitles:
ctx.obj['provider_configs']['opensubtitles'] = {'username': opensubtitles[0], 'password': opensubtitles[1]}
if subscenter:
ctx.obj['provider_configs']['subscenter'] = {'username': subscenter[0], 'password': subscenter[1]} | [
"def",
"subliminal",
"(",
"ctx",
",",
"addic7ed",
",",
"legendastv",
",",
"opensubtitles",
",",
"subscenter",
",",
"cache_dir",
",",
"debug",
")",
":",
"# create cache directory",
"try",
":",
"os",
".",
"makedirs",
"(",
"cache_dir",
")",
"except",
"OSError",
... | Subtitles, faster than your thoughts. | [
"Subtitles",
"faster",
"than",
"your",
"thoughts",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/cli.py#L228-L257 |
226,634 | Diaoul/subliminal | subliminal/cli.py | cache | def cache(ctx, clear_subliminal):
"""Cache management."""
if clear_subliminal:
for file in glob.glob(os.path.join(ctx.parent.params['cache_dir'], cache_file) + '*'):
os.remove(file)
click.echo('Subliminal\'s cache cleared.')
else:
click.echo('Nothing done.') | python | def cache(ctx, clear_subliminal):
if clear_subliminal:
for file in glob.glob(os.path.join(ctx.parent.params['cache_dir'], cache_file) + '*'):
os.remove(file)
click.echo('Subliminal\'s cache cleared.')
else:
click.echo('Nothing done.') | [
"def",
"cache",
"(",
"ctx",
",",
"clear_subliminal",
")",
":",
"if",
"clear_subliminal",
":",
"for",
"file",
"in",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"ctx",
".",
"parent",
".",
"params",
"[",
"'cache_dir'",
"]",
",",
"cach... | Cache management. | [
"Cache",
"management",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/cli.py#L264-L271 |
226,635 | Diaoul/subliminal | subliminal/providers/legendastv.py | LegendasTVProvider.search_titles | def search_titles(self, title):
"""Search for titles matching the `title`.
:param str title: the title to search for.
:return: found titles.
:rtype: dict
"""
# make the query
logger.info('Searching title %r', title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(title), timeout=10)
r.raise_for_status()
results = json.loads(r.text)
# loop over results
titles = {}
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type and title
title = {'type': type_map[source['tipo']], 'title': source['dsc_nome']}
# extract year
if source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
title['year'] = int(source['dsc_data_lancamento'])
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.warning('No season detected for title %d', title_id)
# add title
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles | python | def search_titles(self, title):
# make the query
logger.info('Searching title %r', title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(title), timeout=10)
r.raise_for_status()
results = json.loads(r.text)
# loop over results
titles = {}
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type and title
title = {'type': type_map[source['tipo']], 'title': source['dsc_nome']}
# extract year
if source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
title['year'] = int(source['dsc_data_lancamento'])
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.warning('No season detected for title %d', title_id)
# add title
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles | [
"def",
"search_titles",
"(",
"self",
",",
"title",
")",
":",
"# make the query",
"logger",
".",
"info",
"(",
"'Searching title %r'",
",",
"title",
")",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"server_url",
"+",
"'legenda/sugestao/{}'"... | Search for titles matching the `title`.
:param str title: the title to search for.
:return: found titles.
:rtype: dict | [
"Search",
"for",
"titles",
"matching",
"the",
"title",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/legendastv.py#L203-L255 |
226,636 | Diaoul/subliminal | subliminal/providers/legendastv.py | LegendasTVProvider.get_archives | def get_archives(self, title_id, language_code):
"""Get the archive list from a given `title_id` and `language_code`.
:param int title_id: title id.
:param int language_code: language code.
:return: the archives.
:rtype: list of :class:`LegendasTVArchive`
"""
logger.info('Getting archives for title %d and language %d', title_id, language_code)
archives = []
page = 1
while True:
# get the archive page
url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format(
title=title_id, language=language_code, page=page)
r = self.session.get(url)
r.raise_for_status()
# parse the results
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
for archive_soup in soup.select('div.list_element > article > div'):
# create archive
archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text,
'pack' in archive_soup['class'], 'destaque' in archive_soup['class'],
self.server_url + archive_soup.a['href'][1:])
# extract text containing downloads, rating and timestamp
data_text = archive_soup.find('p', class_='data').text
# match downloads
archive.downloads = int(downloads_re.search(data_text).group('downloads'))
# match rating
match = rating_re.search(data_text)
if match:
archive.rating = int(match.group('rating'))
# match timestamp and validate it
time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()}
archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data))
if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc):
raise ProviderError('Archive timestamp is in the future')
# add archive
archives.append(archive)
# stop on last page
if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None:
break
# increment page count
page += 1
logger.debug('Found %d archives', len(archives))
return archives | python | def get_archives(self, title_id, language_code):
logger.info('Getting archives for title %d and language %d', title_id, language_code)
archives = []
page = 1
while True:
# get the archive page
url = self.server_url + 'util/carrega_legendas_busca_filme/{title}/{language}/-/{page}'.format(
title=title_id, language=language_code, page=page)
r = self.session.get(url)
r.raise_for_status()
# parse the results
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
for archive_soup in soup.select('div.list_element > article > div'):
# create archive
archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2], archive_soup.a.text,
'pack' in archive_soup['class'], 'destaque' in archive_soup['class'],
self.server_url + archive_soup.a['href'][1:])
# extract text containing downloads, rating and timestamp
data_text = archive_soup.find('p', class_='data').text
# match downloads
archive.downloads = int(downloads_re.search(data_text).group('downloads'))
# match rating
match = rating_re.search(data_text)
if match:
archive.rating = int(match.group('rating'))
# match timestamp and validate it
time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()}
archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data))
if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc):
raise ProviderError('Archive timestamp is in the future')
# add archive
archives.append(archive)
# stop on last page
if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None:
break
# increment page count
page += 1
logger.debug('Found %d archives', len(archives))
return archives | [
"def",
"get_archives",
"(",
"self",
",",
"title_id",
",",
"language_code",
")",
":",
"logger",
".",
"info",
"(",
"'Getting archives for title %d and language %d'",
",",
"title_id",
",",
"language_code",
")",
"archives",
"=",
"[",
"]",
"page",
"=",
"1",
"while",
... | Get the archive list from a given `title_id` and `language_code`.
:param int title_id: title id.
:param int language_code: language code.
:return: the archives.
:rtype: list of :class:`LegendasTVArchive` | [
"Get",
"the",
"archive",
"list",
"from",
"a",
"given",
"title_id",
"and",
"language_code",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/legendastv.py#L258-L314 |
226,637 | Diaoul/subliminal | subliminal/core.py | check_video | def check_video(video, languages=None, age=None, undefined=False):
"""Perform some checks on the `video`.
All the checks are optional. Return `False` if any of this check fails:
* `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`.
* `video` is older than `age`.
* `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`.
:param video: video to check.
:type video: :class:`~subliminal.video.Video`
:param languages: desired languages.
:type languages: set of :class:`~babelfish.language.Language`
:param datetime.timedelta age: maximum age of the video.
:param bool undefined: fail on existing undefined language.
:return: `True` if the video passes the checks, `False` otherwise.
:rtype: bool
"""
# language test
if languages and not (languages - video.subtitle_languages):
logger.debug('All languages %r exist', languages)
return False
# age test
if age and video.age > age:
logger.debug('Video is older than %r', age)
return False
# undefined test
if undefined and Language('und') in video.subtitle_languages:
logger.debug('Undefined language found')
return False
return True | python | def check_video(video, languages=None, age=None, undefined=False):
# language test
if languages and not (languages - video.subtitle_languages):
logger.debug('All languages %r exist', languages)
return False
# age test
if age and video.age > age:
logger.debug('Video is older than %r', age)
return False
# undefined test
if undefined and Language('und') in video.subtitle_languages:
logger.debug('Undefined language found')
return False
return True | [
"def",
"check_video",
"(",
"video",
",",
"languages",
"=",
"None",
",",
"age",
"=",
"None",
",",
"undefined",
"=",
"False",
")",
":",
"# language test",
"if",
"languages",
"and",
"not",
"(",
"languages",
"-",
"video",
".",
"subtitle_languages",
")",
":",
... | Perform some checks on the `video`.
All the checks are optional. Return `False` if any of this check fails:
* `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`.
* `video` is older than `age`.
* `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`.
:param video: video to check.
:type video: :class:`~subliminal.video.Video`
:param languages: desired languages.
:type languages: set of :class:`~babelfish.language.Language`
:param datetime.timedelta age: maximum age of the video.
:param bool undefined: fail on existing undefined language.
:return: `True` if the video passes the checks, `False` otherwise.
:rtype: bool | [
"Perform",
"some",
"checks",
"on",
"the",
"video",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L284-L318 |
226,638 | Diaoul/subliminal | subliminal/core.py | search_external_subtitles | def search_external_subtitles(path, directory=None):
"""Search for external subtitles from a video `path` and their associated language.
Unless `directory` is provided, search will be made in the same directory as the video file.
:param str path: path to the video.
:param str directory: directory to search for subtitles.
:return: found subtitles with their languages.
:rtype: dict
"""
# split path
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
# search for subtitles
subtitles = {}
for p in os.listdir(directory or dirpath):
# keep only valid subtitle filenames
if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS):
continue
# extract the potential language code
language = Language('und')
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:]
if language_code:
try:
language = Language.fromietf(language_code)
except (ValueError, LanguageReverseError):
logger.error('Cannot parse language code %r', language_code)
subtitles[p] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles | python | def search_external_subtitles(path, directory=None):
# split path
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
# search for subtitles
subtitles = {}
for p in os.listdir(directory or dirpath):
# keep only valid subtitle filenames
if not p.startswith(fileroot) or not p.endswith(SUBTITLE_EXTENSIONS):
continue
# extract the potential language code
language = Language('und')
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:]
if language_code:
try:
language = Language.fromietf(language_code)
except (ValueError, LanguageReverseError):
logger.error('Cannot parse language code %r', language_code)
subtitles[p] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles | [
"def",
"search_external_subtitles",
"(",
"path",
",",
"directory",
"=",
"None",
")",
":",
"# split path",
"dirpath",
",",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"dirpath",
"=",
"dirpath",
"or",
"'.'",
"fileroot",
",",
"fileext"... | Search for external subtitles from a video `path` and their associated language.
Unless `directory` is provided, search will be made in the same directory as the video file.
:param str path: path to the video.
:param str directory: directory to search for subtitles.
:return: found subtitles with their languages.
:rtype: dict | [
"Search",
"for",
"external",
"subtitles",
"from",
"a",
"video",
"path",
"and",
"their",
"associated",
"language",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L321-L357 |
226,639 | Diaoul/subliminal | subliminal/core.py | scan_video | def scan_video(path):
"""Scan a video from a `path`.
:param str path: existing path to the video.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(VIDEO_EXTENSIONS):
raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning video %r in %r', filename, dirpath)
# guess
video = Video.fromguess(path, guessit(path))
# size and hashes
video.size = os.path.getsize(path)
if video.size > 10485760:
logger.debug('Size is %d', video.size)
video.hashes['opensubtitles'] = hash_opensubtitles(path)
video.hashes['shooter'] = hash_shooter(path)
video.hashes['thesubdb'] = hash_thesubdb(path)
video.hashes['napiprojekt'] = hash_napiprojekt(path)
logger.debug('Computed hashes %r', video.hashes)
else:
logger.warning('Size is lower than 10MB: hashes not computed')
return video | python | def scan_video(path):
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(VIDEO_EXTENSIONS):
raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning video %r in %r', filename, dirpath)
# guess
video = Video.fromguess(path, guessit(path))
# size and hashes
video.size = os.path.getsize(path)
if video.size > 10485760:
logger.debug('Size is %d', video.size)
video.hashes['opensubtitles'] = hash_opensubtitles(path)
video.hashes['shooter'] = hash_shooter(path)
video.hashes['thesubdb'] = hash_thesubdb(path)
video.hashes['napiprojekt'] = hash_napiprojekt(path)
logger.debug('Computed hashes %r', video.hashes)
else:
logger.warning('Size is lower than 10MB: hashes not computed')
return video | [
"def",
"scan_video",
"(",
"path",
")",
":",
"# check for non-existing path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'Path does not exist'",
")",
"# check video extension",
"if",
"not",
"path",
".",
"e... | Scan a video from a `path`.
:param str path: existing path to the video.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video` | [
"Scan",
"a",
"video",
"from",
"a",
"path",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L360-L394 |
226,640 | Diaoul/subliminal | subliminal/core.py | scan_archive | def scan_archive(path):
"""Scan an archive from a `path`.
:param str path: existing path to the archive.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(ARCHIVE_EXTENSIONS):
raise ValueError('%r is not a valid archive extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning archive %r in %r', filename, dirpath)
# rar extension
if filename.endswith('.rar'):
rar = RarFile(path)
# filter on video extensions
rar_filenames = [f for f in rar.namelist() if f.endswith(VIDEO_EXTENSIONS)]
# no video found
if not rar_filenames:
raise ValueError('No video in archive')
# more than one video found
if len(rar_filenames) > 1:
raise ValueError('More than one video in archive')
# guess
rar_filename = rar_filenames[0]
rar_filepath = os.path.join(dirpath, rar_filename)
video = Video.fromguess(rar_filepath, guessit(rar_filepath))
# size
video.size = rar.getinfo(rar_filename).file_size
else:
raise ValueError('Unsupported extension %r' % os.path.splitext(path)[1])
return video | python | def scan_archive(path):
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.endswith(ARCHIVE_EXTENSIONS):
raise ValueError('%r is not a valid archive extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning archive %r in %r', filename, dirpath)
# rar extension
if filename.endswith('.rar'):
rar = RarFile(path)
# filter on video extensions
rar_filenames = [f for f in rar.namelist() if f.endswith(VIDEO_EXTENSIONS)]
# no video found
if not rar_filenames:
raise ValueError('No video in archive')
# more than one video found
if len(rar_filenames) > 1:
raise ValueError('More than one video in archive')
# guess
rar_filename = rar_filenames[0]
rar_filepath = os.path.join(dirpath, rar_filename)
video = Video.fromguess(rar_filepath, guessit(rar_filepath))
# size
video.size = rar.getinfo(rar_filename).file_size
else:
raise ValueError('Unsupported extension %r' % os.path.splitext(path)[1])
return video | [
"def",
"scan_archive",
"(",
"path",
")",
":",
"# check for non-existing path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'Path does not exist'",
")",
"# check video extension",
"if",
"not",
"path",
".",
... | Scan an archive from a `path`.
:param str path: existing path to the archive.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video` | [
"Scan",
"an",
"archive",
"from",
"a",
"path",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L397-L441 |
226,641 | Diaoul/subliminal | subliminal/core.py | scan_videos | def scan_videos(path, age=None, archives=True):
"""Scan `path` for videos and their subtitles.
See :func:`refine` to find additional information for the video.
:param str path: existing directory path to scan.
:param datetime.timedelta age: maximum age of the video or archive.
:param bool archives: scan videos in archives.
:return: the scanned videos.
:rtype: list of :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# walk the path
videos = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %r', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
# filter on videos and archives
if not (filename.endswith(VIDEO_EXTENSIONS) or archives and filename.endswith(ARCHIVE_EXTENSIONS)):
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
# skip links
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# skip old files
if age and datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(filepath)) > age:
logger.debug('Skipping old file %r in %r', filename, dirpath)
continue
# scan
if filename.endswith(VIDEO_EXTENSIONS): # video
try:
video = scan_video(filepath)
except ValueError: # pragma: no cover
logger.exception('Error scanning video')
continue
elif archives and filename.endswith(ARCHIVE_EXTENSIONS): # archive
try:
video = scan_archive(filepath)
except (NotRarFile, RarCannotExec, ValueError): # pragma: no cover
logger.exception('Error scanning archive')
continue
else: # pragma: no cover
raise ValueError('Unsupported file %r' % filename)
videos.append(video)
return videos | python | def scan_videos(path, age=None, archives=True):
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# walk the path
videos = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %r', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
# filter on videos and archives
if not (filename.endswith(VIDEO_EXTENSIONS) or archives and filename.endswith(ARCHIVE_EXTENSIONS)):
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
# skip links
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# skip old files
if age and datetime.utcnow() - datetime.utcfromtimestamp(os.path.getmtime(filepath)) > age:
logger.debug('Skipping old file %r in %r', filename, dirpath)
continue
# scan
if filename.endswith(VIDEO_EXTENSIONS): # video
try:
video = scan_video(filepath)
except ValueError: # pragma: no cover
logger.exception('Error scanning video')
continue
elif archives and filename.endswith(ARCHIVE_EXTENSIONS): # archive
try:
video = scan_archive(filepath)
except (NotRarFile, RarCannotExec, ValueError): # pragma: no cover
logger.exception('Error scanning archive')
continue
else: # pragma: no cover
raise ValueError('Unsupported file %r' % filename)
videos.append(video)
return videos | [
"def",
"scan_videos",
"(",
"path",
",",
"age",
"=",
"None",
",",
"archives",
"=",
"True",
")",
":",
"# check for non-existing path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"raise",
"ValueError",
"(",
"'Path does not exist'",
... | Scan `path` for videos and their subtitles.
See :func:`refine` to find additional information for the video.
:param str path: existing directory path to scan.
:param datetime.timedelta age: maximum age of the video or archive.
:param bool archives: scan videos in archives.
:return: the scanned videos.
:rtype: list of :class:`~subliminal.video.Video` | [
"Scan",
"path",
"for",
"videos",
"and",
"their",
"subtitles",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L444-L517 |
226,642 | Diaoul/subliminal | subliminal/core.py | download_best_subtitles | def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, **kwargs):
"""List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.
:param videos: videos to download subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to download.
:type languages: set of :class:`~babelfish.language.Language`
:param int min_score: minimum score for a subtitle to be downloaded.
:param bool hearing_impaired: hearing impaired preference.
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:param pool_class: class to use as provider pool.
:type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:param \*\*kwargs: additional parameters for the provided `pool_class` constructor.
:return: downloaded subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle`
"""
downloaded_subtitles = defaultdict(list)
# check videos
checked_videos = []
for video in videos:
if not check_video(video, languages=languages, undefined=only_one):
logger.info('Skipping video %r', video)
continue
checked_videos.append(video)
# return immediately if no video passed the checks
if not checked_videos:
return downloaded_subtitles
# download best subtitles
with pool_class(**kwargs) as pool:
for video in checked_videos:
logger.info('Downloading best subtitles for %r', video)
subtitles = pool.download_best_subtitles(pool.list_subtitles(video, languages - video.subtitle_languages),
video, languages, min_score=min_score,
hearing_impaired=hearing_impaired, only_one=only_one,
compute_score=compute_score)
logger.info('Downloaded %d subtitle(s)', len(subtitles))
downloaded_subtitles[video].extend(subtitles)
return downloaded_subtitles | python | def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, **kwargs):
downloaded_subtitles = defaultdict(list)
# check videos
checked_videos = []
for video in videos:
if not check_video(video, languages=languages, undefined=only_one):
logger.info('Skipping video %r', video)
continue
checked_videos.append(video)
# return immediately if no video passed the checks
if not checked_videos:
return downloaded_subtitles
# download best subtitles
with pool_class(**kwargs) as pool:
for video in checked_videos:
logger.info('Downloading best subtitles for %r', video)
subtitles = pool.download_best_subtitles(pool.list_subtitles(video, languages - video.subtitle_languages),
video, languages, min_score=min_score,
hearing_impaired=hearing_impaired, only_one=only_one,
compute_score=compute_score)
logger.info('Downloaded %d subtitle(s)', len(subtitles))
downloaded_subtitles[video].extend(subtitles)
return downloaded_subtitles | [
"def",
"download_best_subtitles",
"(",
"videos",
",",
"languages",
",",
"min_score",
"=",
"0",
",",
"hearing_impaired",
"=",
"False",
",",
"only_one",
"=",
"False",
",",
"compute_score",
"=",
"None",
",",
"pool_class",
"=",
"ProviderPool",
",",
"*",
"*",
"kw... | List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.
:param videos: videos to download subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to download.
:type languages: set of :class:`~babelfish.language.Language`
:param int min_score: minimum score for a subtitle to be downloaded.
:param bool hearing_impaired: hearing impaired preference.
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:param pool_class: class to use as provider pool.
:type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:param \*\*kwargs: additional parameters for the provided `pool_class` constructor.
:return: downloaded subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle` | [
"List",
"and",
"download",
"the",
"best",
"matching",
"subtitles",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L604-L651 |
226,643 | Diaoul/subliminal | subliminal/core.py | save_subtitles | def save_subtitles(video, subtitles, single=False, directory=None, encoding=None):
"""Save subtitles on filesystem.
Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles
with the same language are silently ignored.
The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for
the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle.
:param video: video of the subtitles.
:type video: :class:`~subliminal.video.Video`
:param subtitles: subtitles to save.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param bool single: save a single subtitle, default is to save one subtitle per language.
:param str directory: path to directory where to save the subtitles, default is next to the video.
:param str encoding: encoding in which to save the subtitles, default is to keep original encoding.
:return: the saved subtitles
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
"""
saved_subtitles = []
for subtitle in subtitles:
# check content
if subtitle.content is None:
logger.error('Skipping subtitle %r: no content', subtitle)
continue
# check language
if subtitle.language in set(s.language for s in saved_subtitles):
logger.debug('Skipping subtitle %r: language already saved', subtitle)
continue
# create subtitle path
subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language)
if directory is not None:
subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1])
# save content as is or in the specified encoding
logger.info('Saving %r to %r', subtitle, subtitle_path)
if encoding is None:
with io.open(subtitle_path, 'wb') as f:
f.write(subtitle.content)
else:
with io.open(subtitle_path, 'w', encoding=encoding) as f:
f.write(subtitle.text)
saved_subtitles.append(subtitle)
# check single
if single:
break
return saved_subtitles | python | def save_subtitles(video, subtitles, single=False, directory=None, encoding=None):
saved_subtitles = []
for subtitle in subtitles:
# check content
if subtitle.content is None:
logger.error('Skipping subtitle %r: no content', subtitle)
continue
# check language
if subtitle.language in set(s.language for s in saved_subtitles):
logger.debug('Skipping subtitle %r: language already saved', subtitle)
continue
# create subtitle path
subtitle_path = get_subtitle_path(video.name, None if single else subtitle.language)
if directory is not None:
subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1])
# save content as is or in the specified encoding
logger.info('Saving %r to %r', subtitle, subtitle_path)
if encoding is None:
with io.open(subtitle_path, 'wb') as f:
f.write(subtitle.content)
else:
with io.open(subtitle_path, 'w', encoding=encoding) as f:
f.write(subtitle.text)
saved_subtitles.append(subtitle)
# check single
if single:
break
return saved_subtitles | [
"def",
"save_subtitles",
"(",
"video",
",",
"subtitles",
",",
"single",
"=",
"False",
",",
"directory",
"=",
"None",
",",
"encoding",
"=",
"None",
")",
":",
"saved_subtitles",
"=",
"[",
"]",
"for",
"subtitle",
"in",
"subtitles",
":",
"# check content",
"if... | Save subtitles on filesystem.
Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles
with the same language are silently ignored.
The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for
the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle.
:param video: video of the subtitles.
:type video: :class:`~subliminal.video.Video`
:param subtitles: subtitles to save.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param bool single: save a single subtitle, default is to save one subtitle per language.
:param str directory: path to directory where to save the subtitles, default is next to the video.
:param str encoding: encoding in which to save the subtitles, default is to keep original encoding.
:return: the saved subtitles
:rtype: list of :class:`~subliminal.subtitle.Subtitle` | [
"Save",
"subtitles",
"on",
"filesystem",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L654-L705 |
226,644 | Diaoul/subliminal | subliminal/core.py | ProviderPool.list_subtitles_provider | def list_subtitles_provider(self, provider, video, languages):
"""List subtitles with a single provider.
The video and languages are checked against the provider.
:param str provider: name of the provider.
:param video: video to list subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:return: found subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle` or None
"""
# check video validity
if not provider_manager[provider].plugin.check(video):
logger.info('Skipping provider %r: not a valid video', provider)
return []
# check supported languages
provider_languages = provider_manager[provider].plugin.languages & languages
if not provider_languages:
logger.info('Skipping provider %r: no language to search for', provider)
return []
# list subtitles
logger.info('Listing subtitles with provider %r and languages %r', provider, provider_languages)
try:
return self[provider].list_subtitles(video, provider_languages)
except (requests.Timeout, socket.timeout):
logger.error('Provider %r timed out', provider)
except:
logger.exception('Unexpected error in provider %r', provider) | python | def list_subtitles_provider(self, provider, video, languages):
# check video validity
if not provider_manager[provider].plugin.check(video):
logger.info('Skipping provider %r: not a valid video', provider)
return []
# check supported languages
provider_languages = provider_manager[provider].plugin.languages & languages
if not provider_languages:
logger.info('Skipping provider %r: no language to search for', provider)
return []
# list subtitles
logger.info('Listing subtitles with provider %r and languages %r', provider, provider_languages)
try:
return self[provider].list_subtitles(video, provider_languages)
except (requests.Timeout, socket.timeout):
logger.error('Provider %r timed out', provider)
except:
logger.exception('Unexpected error in provider %r', provider) | [
"def",
"list_subtitles_provider",
"(",
"self",
",",
"provider",
",",
"video",
",",
"languages",
")",
":",
"# check video validity",
"if",
"not",
"provider_manager",
"[",
"provider",
"]",
".",
"plugin",
".",
"check",
"(",
"video",
")",
":",
"logger",
".",
"in... | List subtitles with a single provider.
The video and languages are checked against the provider.
:param str provider: name of the provider.
:param video: video to list subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:return: found subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle` or None | [
"List",
"subtitles",
"with",
"a",
"single",
"provider",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L90-L122 |
226,645 | Diaoul/subliminal | subliminal/core.py | ProviderPool.download_best_subtitles | def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False,
compute_score=None):
"""Download the best matching subtitles.
:param subtitles: the subtitles to use.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param video: video to download subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to download.
:type languages: set of :class:`~babelfish.language.Language`
:param int min_score: minimum score for a subtitle to be downloaded.
:param bool hearing_impaired: hearing impaired preference.
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:return: downloaded subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
"""
compute_score = compute_score or default_compute_score
# sort subtitles by score
scored_subtitles = sorted([(s, compute_score(s, video, hearing_impaired=hearing_impaired))
for s in subtitles], key=operator.itemgetter(1), reverse=True)
# download best subtitles, falling back on the next on error
downloaded_subtitles = []
for subtitle, score in scored_subtitles:
# check score
if score < min_score:
logger.info('Score %d is below min_score (%d)', score, min_score)
break
# check downloaded languages
if subtitle.language in set(s.language for s in downloaded_subtitles):
logger.debug('Skipping subtitle: %r already downloaded', subtitle.language)
continue
# download
if self.download_subtitle(subtitle):
downloaded_subtitles.append(subtitle)
# stop when all languages are downloaded
if set(s.language for s in downloaded_subtitles) == languages:
logger.debug('All languages downloaded')
break
# stop if only one subtitle is requested
if only_one:
logger.debug('Only one subtitle downloaded')
break
return downloaded_subtitles | python | def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False,
compute_score=None):
compute_score = compute_score or default_compute_score
# sort subtitles by score
scored_subtitles = sorted([(s, compute_score(s, video, hearing_impaired=hearing_impaired))
for s in subtitles], key=operator.itemgetter(1), reverse=True)
# download best subtitles, falling back on the next on error
downloaded_subtitles = []
for subtitle, score in scored_subtitles:
# check score
if score < min_score:
logger.info('Score %d is below min_score (%d)', score, min_score)
break
# check downloaded languages
if subtitle.language in set(s.language for s in downloaded_subtitles):
logger.debug('Skipping subtitle: %r already downloaded', subtitle.language)
continue
# download
if self.download_subtitle(subtitle):
downloaded_subtitles.append(subtitle)
# stop when all languages are downloaded
if set(s.language for s in downloaded_subtitles) == languages:
logger.debug('All languages downloaded')
break
# stop if only one subtitle is requested
if only_one:
logger.debug('Only one subtitle downloaded')
break
return downloaded_subtitles | [
"def",
"download_best_subtitles",
"(",
"self",
",",
"subtitles",
",",
"video",
",",
"languages",
",",
"min_score",
"=",
"0",
",",
"hearing_impaired",
"=",
"False",
",",
"only_one",
"=",
"False",
",",
"compute_score",
"=",
"None",
")",
":",
"compute_score",
"... | Download the best matching subtitles.
:param subtitles: the subtitles to use.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param video: video to download subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to download.
:type languages: set of :class:`~babelfish.language.Language`
:param int min_score: minimum score for a subtitle to be downloaded.
:param bool hearing_impaired: hearing impaired preference.
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:return: downloaded subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle` | [
"Download",
"the",
"best",
"matching",
"subtitles",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/core.py#L188-L240 |
226,646 | Diaoul/subliminal | subliminal/score.py | get_scores | def get_scores(video):
"""Get the scores dict for the given `video`.
This will return either :data:`episode_scores` or :data:`movie_scores` based on the type of the `video`.
:param video: the video to compute the score against.
:type video: :class:`~subliminal.video.Video`
:return: the scores dict.
:rtype: dict
"""
if isinstance(video, Episode):
return episode_scores
elif isinstance(video, Movie):
return movie_scores
raise ValueError('video must be an instance of Episode or Movie') | python | def get_scores(video):
if isinstance(video, Episode):
return episode_scores
elif isinstance(video, Movie):
return movie_scores
raise ValueError('video must be an instance of Episode or Movie') | [
"def",
"get_scores",
"(",
"video",
")",
":",
"if",
"isinstance",
"(",
"video",
",",
"Episode",
")",
":",
"return",
"episode_scores",
"elif",
"isinstance",
"(",
"video",
",",
"Movie",
")",
":",
"return",
"movie_scores",
"raise",
"ValueError",
"(",
"'video mus... | Get the scores dict for the given `video`.
This will return either :data:`episode_scores` or :data:`movie_scores` based on the type of the `video`.
:param video: the video to compute the score against.
:type video: :class:`~subliminal.video.Video`
:return: the scores dict.
:rtype: dict | [
"Get",
"the",
"scores",
"dict",
"for",
"the",
"given",
"video",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/score.py#L65-L81 |
226,647 | Diaoul/subliminal | subliminal/score.py | compute_score | def compute_score(subtitle, video, hearing_impaired=None):
"""Compute the score of the `subtitle` against the `video` with `hearing_impaired` preference.
:func:`compute_score` uses the :meth:`Subtitle.get_matches <subliminal.subtitle.Subtitle.get_matches>` method and
applies the scores (either from :data:`episode_scores` or :data:`movie_scores`) after some processing.
:param subtitle: the subtitle to compute the score of.
:type subtitle: :class:`~subliminal.subtitle.Subtitle`
:param video: the video to compute the score against.
:type video: :class:`~subliminal.video.Video`
:param bool hearing_impaired: hearing impaired preference.
:return: score of the subtitle.
:rtype: int
"""
logger.info('Computing score of %r for video %r with %r', subtitle, video, dict(hearing_impaired=hearing_impaired))
# get the scores dict
scores = get_scores(video)
logger.debug('Using scores %r', scores)
# get the matches
matches = subtitle.get_matches(video)
logger.debug('Found matches %r', matches)
# on hash match, discard everything else
if 'hash' in matches:
logger.debug('Keeping only hash match')
matches &= {'hash'}
# handle equivalent matches
if isinstance(video, Episode):
if 'title' in matches:
logger.debug('Adding title match equivalent')
matches.add('episode')
if 'series_imdb_id' in matches:
logger.debug('Adding series_imdb_id match equivalent')
matches |= {'series', 'year'}
if 'imdb_id' in matches:
logger.debug('Adding imdb_id match equivalents')
matches |= {'series', 'year', 'season', 'episode'}
if 'tvdb_id' in matches:
logger.debug('Adding tvdb_id match equivalents')
matches |= {'series', 'year', 'season', 'episode'}
if 'series_tvdb_id' in matches:
logger.debug('Adding series_tvdb_id match equivalents')
matches |= {'series', 'year'}
elif isinstance(video, Movie):
if 'imdb_id' in matches:
logger.debug('Adding imdb_id match equivalents')
matches |= {'title', 'year'}
# handle hearing impaired
if hearing_impaired is not None and subtitle.hearing_impaired == hearing_impaired:
logger.debug('Matched hearing_impaired')
matches.add('hearing_impaired')
# compute the score
score = sum((scores.get(match, 0) for match in matches))
logger.info('Computed score %r with final matches %r', score, matches)
# ensure score is within valid bounds
assert 0 <= score <= scores['hash'] + scores['hearing_impaired']
return score | python | def compute_score(subtitle, video, hearing_impaired=None):
logger.info('Computing score of %r for video %r with %r', subtitle, video, dict(hearing_impaired=hearing_impaired))
# get the scores dict
scores = get_scores(video)
logger.debug('Using scores %r', scores)
# get the matches
matches = subtitle.get_matches(video)
logger.debug('Found matches %r', matches)
# on hash match, discard everything else
if 'hash' in matches:
logger.debug('Keeping only hash match')
matches &= {'hash'}
# handle equivalent matches
if isinstance(video, Episode):
if 'title' in matches:
logger.debug('Adding title match equivalent')
matches.add('episode')
if 'series_imdb_id' in matches:
logger.debug('Adding series_imdb_id match equivalent')
matches |= {'series', 'year'}
if 'imdb_id' in matches:
logger.debug('Adding imdb_id match equivalents')
matches |= {'series', 'year', 'season', 'episode'}
if 'tvdb_id' in matches:
logger.debug('Adding tvdb_id match equivalents')
matches |= {'series', 'year', 'season', 'episode'}
if 'series_tvdb_id' in matches:
logger.debug('Adding series_tvdb_id match equivalents')
matches |= {'series', 'year'}
elif isinstance(video, Movie):
if 'imdb_id' in matches:
logger.debug('Adding imdb_id match equivalents')
matches |= {'title', 'year'}
# handle hearing impaired
if hearing_impaired is not None and subtitle.hearing_impaired == hearing_impaired:
logger.debug('Matched hearing_impaired')
matches.add('hearing_impaired')
# compute the score
score = sum((scores.get(match, 0) for match in matches))
logger.info('Computed score %r with final matches %r', score, matches)
# ensure score is within valid bounds
assert 0 <= score <= scores['hash'] + scores['hearing_impaired']
return score | [
"def",
"compute_score",
"(",
"subtitle",
",",
"video",
",",
"hearing_impaired",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Computing score of %r for video %r with %r'",
",",
"subtitle",
",",
"video",
",",
"dict",
"(",
"hearing_impaired",
"=",
"hearing_im... | Compute the score of the `subtitle` against the `video` with `hearing_impaired` preference.
:func:`compute_score` uses the :meth:`Subtitle.get_matches <subliminal.subtitle.Subtitle.get_matches>` method and
applies the scores (either from :data:`episode_scores` or :data:`movie_scores`) after some processing.
:param subtitle: the subtitle to compute the score of.
:type subtitle: :class:`~subliminal.subtitle.Subtitle`
:param video: the video to compute the score against.
:type video: :class:`~subliminal.video.Video`
:param bool hearing_impaired: hearing impaired preference.
:return: score of the subtitle.
:rtype: int | [
"Compute",
"the",
"score",
"of",
"the",
"subtitle",
"against",
"the",
"video",
"with",
"hearing_impaired",
"preference",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/score.py#L84-L148 |
226,648 | Diaoul/subliminal | subliminal/providers/__init__.py | Provider.check | def check(cls, video):
"""Check if the `video` can be processed.
The `video` is considered invalid if not an instance of :attr:`video_types` or if the :attr:`required_hash` is
not present in :attr:`~subliminal.video.Video.hashes` attribute of the `video`.
:param video: the video to check.
:type video: :class:`~subliminal.video.Video`
:return: `True` if the `video` is valid, `False` otherwise.
:rtype: bool
"""
if not isinstance(video, cls.video_types):
return False
if cls.required_hash is not None and cls.required_hash not in video.hashes:
return False
return True | python | def check(cls, video):
if not isinstance(video, cls.video_types):
return False
if cls.required_hash is not None and cls.required_hash not in video.hashes:
return False
return True | [
"def",
"check",
"(",
"cls",
",",
"video",
")",
":",
"if",
"not",
"isinstance",
"(",
"video",
",",
"cls",
".",
"video_types",
")",
":",
"return",
"False",
"if",
"cls",
".",
"required_hash",
"is",
"not",
"None",
"and",
"cls",
".",
"required_hash",
"not",... | Check if the `video` can be processed.
The `video` is considered invalid if not an instance of :attr:`video_types` or if the :attr:`required_hash` is
not present in :attr:`~subliminal.video.Video.hashes` attribute of the `video`.
:param video: the video to check.
:type video: :class:`~subliminal.video.Video`
:return: `True` if the `video` is valid, `False` otherwise.
:rtype: bool | [
"Check",
"if",
"the",
"video",
"can",
"be",
"processed",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/__init__.py#L102-L119 |
226,649 | Diaoul/subliminal | subliminal/providers/addic7ed.py | Addic7edProvider._get_show_ids | def _get_show_ids(self):
"""Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict
"""
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids | python | def _get_show_ids(self):
# get the show page
logger.info('Getting show ids')
r = self.session.get(self.server_url + 'shows.php', timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# populate the show ids
show_ids = {}
for show in soup.select('td.version > h3 > a[href^="/show/"]'):
show_ids[sanitize(show.text)] = int(show['href'][6:])
logger.debug('Found %d show ids', len(show_ids))
return show_ids | [
"def",
"_get_show_ids",
"(",
"self",
")",
":",
"# get the show page",
"logger",
".",
"info",
"(",
"'Getting show ids'",
")",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"server_url",
"+",
"'shows.php'",
",",
"timeout",
"=",
"10",
")",
... | Get the ``dict`` of show ids per series by querying the `shows.php` page.
:return: show id per series, lower case and without quotes.
:rtype: dict | [
"Get",
"the",
"dict",
"of",
"show",
"ids",
"per",
"series",
"by",
"querying",
"the",
"shows",
".",
"php",
"page",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/addic7ed.py#L126-L145 |
226,650 | Diaoul/subliminal | subliminal/providers/addic7ed.py | Addic7edProvider.get_show_id | def get_show_id(self, series, year=None, country_code=None):
"""Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int
"""
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series not found in show ids')
show_id = self._search_show_id(series)
return show_id | python | def get_show_id(self, series, year=None, country_code=None):
series_sanitized = sanitize(series).lower()
show_ids = self._get_show_ids()
show_id = None
# attempt with country
if not show_id and country_code:
logger.debug('Getting show id with country')
show_id = show_ids.get('%s %s' % (series_sanitized, country_code.lower()))
# attempt with year
if not show_id and year:
logger.debug('Getting show id with year')
show_id = show_ids.get('%s %d' % (series_sanitized, year))
# attempt clean
if not show_id:
logger.debug('Getting show id')
show_id = show_ids.get(series_sanitized)
# search as last resort
if not show_id:
logger.warning('Series not found in show ids')
show_id = self._search_show_id(series)
return show_id | [
"def",
"get_show_id",
"(",
"self",
",",
"series",
",",
"year",
"=",
"None",
",",
"country_code",
"=",
"None",
")",
":",
"series_sanitized",
"=",
"sanitize",
"(",
"series",
")",
".",
"lower",
"(",
")",
"show_ids",
"=",
"self",
".",
"_get_show_ids",
"(",
... | Get the best matching show id for `series`, `year` and `country_code`.
First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:param country_code: country code of the series, if any.
:type country_code: str
:return: the show id, if found.
:rtype: int | [
"Get",
"the",
"best",
"matching",
"show",
"id",
"for",
"series",
"year",
"and",
"country_code",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/addic7ed.py#L186-L224 |
226,651 | Diaoul/subliminal | subliminal/providers/tvsubtitles.py | TVsubtitlesProvider.get_episode_ids | def get_episode_ids(self, show_id, season):
"""Get episode ids from the show id and the season.
:param int show_id: show id.
:param int season: season of the episode.
:return: episode ids per episode number.
:rtype: dict
"""
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'tvshow-%d-%d.html' % (show_id, season), timeout=10)
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over episode rows
episode_ids = {}
for row in soup.select('table#table5 tr'):
# skip rows that do not have a link to the episode page
if not row('a', href=episode_id_re):
continue
# extract data from the cells
cells = row('td')
episode = int(cells[0].text.split('x')[1])
episode_id = int(cells[1].a['href'][8:-5])
episode_ids[episode] = episode_id
if episode_ids:
logger.debug('Found episode ids %r', episode_ids)
else:
logger.warning('No episode ids found')
return episode_ids | python | def get_episode_ids(self, show_id, season):
# get the page of the season of the show
logger.info('Getting the page of show id %d, season %d', show_id, season)
r = self.session.get(self.server_url + 'tvshow-%d-%d.html' % (show_id, season), timeout=10)
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# loop over episode rows
episode_ids = {}
for row in soup.select('table#table5 tr'):
# skip rows that do not have a link to the episode page
if not row('a', href=episode_id_re):
continue
# extract data from the cells
cells = row('td')
episode = int(cells[0].text.split('x')[1])
episode_id = int(cells[1].a['href'][8:-5])
episode_ids[episode] = episode_id
if episode_ids:
logger.debug('Found episode ids %r', episode_ids)
else:
logger.warning('No episode ids found')
return episode_ids | [
"def",
"get_episode_ids",
"(",
"self",
",",
"show_id",
",",
"season",
")",
":",
"# get the page of the season of the show",
"logger",
".",
"info",
"(",
"'Getting the page of show id %d, season %d'",
",",
"show_id",
",",
"season",
")",
"r",
"=",
"self",
".",
"session... | Get episode ids from the show id and the season.
:param int show_id: show id.
:param int season: season of the episode.
:return: episode ids per episode number.
:rtype: dict | [
"Get",
"episode",
"ids",
"from",
"the",
"show",
"id",
"and",
"the",
"season",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/tvsubtitles.py#L127-L159 |
226,652 | Diaoul/subliminal | subliminal/refiners/metadata.py | refine | def refine(video, embedded_subtitles=True, **kwargs):
"""Refine a video by searching its metadata.
Several :class:`~subliminal.video.Video` attributes can be found:
* :attr:`~subliminal.video.Video.resolution`
* :attr:`~subliminal.video.Video.video_codec`
* :attr:`~subliminal.video.Video.audio_codec`
* :attr:`~subliminal.video.Video.subtitle_languages`
:param bool embedded_subtitles: search for embedded subtitles.
"""
# skip non existing videos
if not video.exists:
return
# check extensions
extension = os.path.splitext(video.name)[1]
if extension == '.mkv':
with open(video.name, 'rb') as f:
mkv = MKV(f)
# main video track
if mkv.video_tracks:
video_track = mkv.video_tracks[0]
# resolution
if video_track.height in (480, 720, 1080):
if video_track.interlaced:
video.resolution = '%di' % video_track.height
else:
video.resolution = '%dp' % video_track.height
logger.debug('Found resolution %s', video.resolution)
# video codec
if video_track.codec_id == 'V_MPEG4/ISO/AVC':
video.video_codec = 'h264'
logger.debug('Found video_codec %s', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/SP':
video.video_codec = 'DivX'
logger.debug('Found video_codec %s', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/ASP':
video.video_codec = 'XviD'
logger.debug('Found video_codec %s', video.video_codec)
else:
logger.warning('MKV has no video track')
# main audio track
if mkv.audio_tracks:
audio_track = mkv.audio_tracks[0]
# audio codec
if audio_track.codec_id == 'A_AC3':
video.audio_codec = 'AC3'
logger.debug('Found audio_codec %s', video.audio_codec)
elif audio_track.codec_id == 'A_DTS':
video.audio_codec = 'DTS'
logger.debug('Found audio_codec %s', video.audio_codec)
elif audio_track.codec_id == 'A_AAC':
video.audio_codec = 'AAC'
logger.debug('Found audio_codec %s', video.audio_codec)
else:
logger.warning('MKV has no audio track')
# subtitle tracks
if mkv.subtitle_tracks:
if embedded_subtitles:
embedded_subtitle_languages = set()
for st in mkv.subtitle_tracks:
if st.language:
try:
embedded_subtitle_languages.add(Language.fromalpha3b(st.language))
except BabelfishError:
logger.error('Embedded subtitle track language %r is not a valid language', st.language)
embedded_subtitle_languages.add(Language('und'))
elif st.name:
try:
embedded_subtitle_languages.add(Language.fromname(st.name))
except BabelfishError:
logger.debug('Embedded subtitle track name %r is not a valid language', st.name)
embedded_subtitle_languages.add(Language('und'))
else:
embedded_subtitle_languages.add(Language('und'))
logger.debug('Found embedded subtitle %r', embedded_subtitle_languages)
video.subtitle_languages |= embedded_subtitle_languages
else:
logger.debug('MKV has no subtitle track')
else:
logger.debug('Unsupported video extension %s', extension) | python | def refine(video, embedded_subtitles=True, **kwargs):
# skip non existing videos
if not video.exists:
return
# check extensions
extension = os.path.splitext(video.name)[1]
if extension == '.mkv':
with open(video.name, 'rb') as f:
mkv = MKV(f)
# main video track
if mkv.video_tracks:
video_track = mkv.video_tracks[0]
# resolution
if video_track.height in (480, 720, 1080):
if video_track.interlaced:
video.resolution = '%di' % video_track.height
else:
video.resolution = '%dp' % video_track.height
logger.debug('Found resolution %s', video.resolution)
# video codec
if video_track.codec_id == 'V_MPEG4/ISO/AVC':
video.video_codec = 'h264'
logger.debug('Found video_codec %s', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/SP':
video.video_codec = 'DivX'
logger.debug('Found video_codec %s', video.video_codec)
elif video_track.codec_id == 'V_MPEG4/ISO/ASP':
video.video_codec = 'XviD'
logger.debug('Found video_codec %s', video.video_codec)
else:
logger.warning('MKV has no video track')
# main audio track
if mkv.audio_tracks:
audio_track = mkv.audio_tracks[0]
# audio codec
if audio_track.codec_id == 'A_AC3':
video.audio_codec = 'AC3'
logger.debug('Found audio_codec %s', video.audio_codec)
elif audio_track.codec_id == 'A_DTS':
video.audio_codec = 'DTS'
logger.debug('Found audio_codec %s', video.audio_codec)
elif audio_track.codec_id == 'A_AAC':
video.audio_codec = 'AAC'
logger.debug('Found audio_codec %s', video.audio_codec)
else:
logger.warning('MKV has no audio track')
# subtitle tracks
if mkv.subtitle_tracks:
if embedded_subtitles:
embedded_subtitle_languages = set()
for st in mkv.subtitle_tracks:
if st.language:
try:
embedded_subtitle_languages.add(Language.fromalpha3b(st.language))
except BabelfishError:
logger.error('Embedded subtitle track language %r is not a valid language', st.language)
embedded_subtitle_languages.add(Language('und'))
elif st.name:
try:
embedded_subtitle_languages.add(Language.fromname(st.name))
except BabelfishError:
logger.debug('Embedded subtitle track name %r is not a valid language', st.name)
embedded_subtitle_languages.add(Language('und'))
else:
embedded_subtitle_languages.add(Language('und'))
logger.debug('Found embedded subtitle %r', embedded_subtitle_languages)
video.subtitle_languages |= embedded_subtitle_languages
else:
logger.debug('MKV has no subtitle track')
else:
logger.debug('Unsupported video extension %s', extension) | [
"def",
"refine",
"(",
"video",
",",
"embedded_subtitles",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"# skip non existing videos",
"if",
"not",
"video",
".",
"exists",
":",
"return",
"# check extensions",
"extension",
"=",
"os",
".",
"path",
".",
"split... | Refine a video by searching its metadata.
Several :class:`~subliminal.video.Video` attributes can be found:
* :attr:`~subliminal.video.Video.resolution`
* :attr:`~subliminal.video.Video.video_codec`
* :attr:`~subliminal.video.Video.audio_codec`
* :attr:`~subliminal.video.Video.subtitle_languages`
:param bool embedded_subtitles: search for embedded subtitles. | [
"Refine",
"a",
"video",
"by",
"searching",
"its",
"metadata",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/refiners/metadata.py#L11-L99 |
226,653 | Diaoul/subliminal | subliminal/refiners/tvdb.py | get_series_episode | def get_series_episode(series_id, season, episode):
"""Get an episode of a series.
:param int series_id: id of the series.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:return: the episode data.
:rtype: dict
"""
result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode)
if result:
return tvdb_client.get_episode(result['data'][0]['id']) | python | def get_series_episode(series_id, season, episode):
result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode)
if result:
return tvdb_client.get_episode(result['data'][0]['id']) | [
"def",
"get_series_episode",
"(",
"series_id",
",",
"season",
",",
"episode",
")",
":",
"result",
"=",
"tvdb_client",
".",
"query_series_episodes",
"(",
"series_id",
",",
"aired_season",
"=",
"season",
",",
"aired_episode",
"=",
"episode",
")",
"if",
"result",
... | Get an episode of a series.
:param int series_id: id of the series.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:return: the episode data.
:rtype: dict | [
"Get",
"an",
"episode",
"of",
"a",
"series",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/refiners/tvdb.py#L222-L234 |
226,654 | Diaoul/subliminal | subliminal/refiners/tvdb.py | TVDBClient.get_series_episodes | def get_series_episodes(self, id, page=1):
"""Get series episodes"""
# perform the request
params = {'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json() | python | def get_series_episodes(self, id, page=1):
# perform the request
params = {'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json() | [
"def",
"get_series_episodes",
"(",
"self",
",",
"id",
",",
"page",
"=",
"1",
")",
":",
"# perform the request",
"params",
"=",
"{",
"'page'",
":",
"page",
"}",
"r",
"=",
"self",
".",
"session",
".",
"get",
"(",
"self",
".",
"base_url",
"+",
"'/series/{... | Get series episodes | [
"Get",
"series",
"episodes"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/refiners/tvdb.py#L156-L165 |
226,655 | Diaoul/subliminal | subliminal/refiners/tvdb.py | TVDBClient.query_series_episodes | def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None,
dvd_episode=None, imdb_id=None, page=1):
"""Query series episodes"""
# perform the request
params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode,
'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json() | python | def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None,
dvd_episode=None, imdb_id=None, page=1):
# perform the request
params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode,
'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json() | [
"def",
"query_series_episodes",
"(",
"self",
",",
"id",
",",
"absolute_number",
"=",
"None",
",",
"aired_season",
"=",
"None",
",",
"aired_episode",
"=",
"None",
",",
"dvd_season",
"=",
"None",
",",
"dvd_episode",
"=",
"None",
",",
"imdb_id",
"=",
"None",
... | Query series episodes | [
"Query",
"series",
"episodes"
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/refiners/tvdb.py#L168-L179 |
226,656 | Diaoul/subliminal | subliminal/providers/opensubtitles.py | checked | def checked(response):
"""Check a response status before returning it.
:param response: a response from a XMLRPC call to OpenSubtitles.
:return: the response.
:raise: :class:`OpenSubtitlesError`
"""
status_code = int(response['status'][:3])
if status_code == 401:
raise Unauthorized
if status_code == 406:
raise NoSession
if status_code == 407:
raise DownloadLimitReached
if status_code == 413:
raise InvalidImdbid
if status_code == 414:
raise UnknownUserAgent
if status_code == 415:
raise DisabledUserAgent
if status_code == 503:
raise ServiceUnavailable
if status_code != 200:
raise OpenSubtitlesError(response['status'])
return response | python | def checked(response):
status_code = int(response['status'][:3])
if status_code == 401:
raise Unauthorized
if status_code == 406:
raise NoSession
if status_code == 407:
raise DownloadLimitReached
if status_code == 413:
raise InvalidImdbid
if status_code == 414:
raise UnknownUserAgent
if status_code == 415:
raise DisabledUserAgent
if status_code == 503:
raise ServiceUnavailable
if status_code != 200:
raise OpenSubtitlesError(response['status'])
return response | [
"def",
"checked",
"(",
"response",
")",
":",
"status_code",
"=",
"int",
"(",
"response",
"[",
"'status'",
"]",
"[",
":",
"3",
"]",
")",
"if",
"status_code",
"==",
"401",
":",
"raise",
"Unauthorized",
"if",
"status_code",
"==",
"406",
":",
"raise",
"NoS... | Check a response status before returning it.
:param response: a response from a XMLRPC call to OpenSubtitles.
:return: the response.
:raise: :class:`OpenSubtitlesError` | [
"Check",
"a",
"response",
"status",
"before",
"returning",
"it",
"."
] | a952dfb2032eb0fd6eb1eb89f04080923c11c4cf | https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/opensubtitles.py#L268-L294 |
226,657 | yougov/mongo-connector | mongo_connector/util.py | retry_until_ok | def retry_until_ok(func, *args, **kwargs):
"""Retry code block until it succeeds.
If it does not succeed in 120 attempts, the function re-raises any
error the function raised on its last attempt.
"""
max_tries = 120
for i in range(max_tries):
try:
return func(*args, **kwargs)
except RuntimeError:
# Do not mask RuntimeError.
raise
except errors.OperationFailure as exc:
if exc.code == 13 or ( # MongoDB >= 2.6 sets the error code,
exc.details
and "unauthorized" == exc.details.get("errmsg") # MongoDB 2.4 does not.
):
# Do not mask authorization failures.
raise
if i == max_tries - 1:
LOG.exception(
"Call to %s failed too many times in " "retry_until_ok", func
)
raise
except Exception:
if i == max_tries - 1:
LOG.exception(
"Call to %s failed too many times in " "retry_until_ok", func
)
raise
time.sleep(1) | python | def retry_until_ok(func, *args, **kwargs):
max_tries = 120
for i in range(max_tries):
try:
return func(*args, **kwargs)
except RuntimeError:
# Do not mask RuntimeError.
raise
except errors.OperationFailure as exc:
if exc.code == 13 or ( # MongoDB >= 2.6 sets the error code,
exc.details
and "unauthorized" == exc.details.get("errmsg") # MongoDB 2.4 does not.
):
# Do not mask authorization failures.
raise
if i == max_tries - 1:
LOG.exception(
"Call to %s failed too many times in " "retry_until_ok", func
)
raise
except Exception:
if i == max_tries - 1:
LOG.exception(
"Call to %s failed too many times in " "retry_until_ok", func
)
raise
time.sleep(1) | [
"def",
"retry_until_ok",
"(",
"func",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"max_tries",
"=",
"120",
"for",
"i",
"in",
"range",
"(",
"max_tries",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")... | Retry code block until it succeeds.
If it does not succeed in 120 attempts, the function re-raises any
error the function raised on its last attempt. | [
"Retry",
"code",
"block",
"until",
"it",
"succeeds",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/util.py#L69-L101 |
226,658 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_base.py | DocManagerBase.apply_update | def apply_update(self, doc, update_spec):
"""Apply an update operation to a document."""
# Helper to cast a key for a list or dict, or raise ValueError
def _convert_or_raise(container, key):
if isinstance(container, dict):
return key
elif isinstance(container, list):
return int(key)
else:
raise ValueError
# Helper to retrieve (and/or create)
# a dot-separated path within a document.
def _retrieve_path(container, path, create=False):
looking_at = container
for part in path:
if isinstance(looking_at, dict):
if create and part not in looking_at:
looking_at[part] = {}
looking_at = looking_at[part]
elif isinstance(looking_at, list):
index = int(part)
# Do we need to create additional space in the array?
if create and len(looking_at) <= index:
# Fill buckets with None up to the index we need.
looking_at.extend([None] * (index - len(looking_at)))
# Bucket we need gets the empty dictionary.
looking_at.append({})
looking_at = looking_at[index]
else:
raise ValueError
return looking_at
def _set_field(doc, to_set, value):
if "." in to_set:
path = to_set.split(".")
where = _retrieve_path(doc, path[:-1], create=True)
index = _convert_or_raise(where, path[-1])
wl = len(where)
if isinstance(where, list) and index >= wl:
where.extend([None] * (index + 1 - wl))
where[index] = value
else:
doc[to_set] = value
def _unset_field(doc, to_unset):
try:
if "." in to_unset:
path = to_unset.split(".")
where = _retrieve_path(doc, path[:-1])
index_or_key = _convert_or_raise(where, path[-1])
if isinstance(where, list):
# Unset an array element sets it to null.
where[index_or_key] = None
else:
# Unset field removes it entirely.
del where[index_or_key]
else:
del doc[to_unset]
except (KeyError, IndexError, ValueError):
source_version = get_mininum_mongodb_version()
if source_version is None or source_version.at_least(2, 6):
raise
# Ignore unset errors since MongoDB 2.4 records invalid
# $unsets in the oplog.
LOG.warning(
"Could not unset field %r from document %r. "
"This may be normal when replicating from "
"MongoDB 2.4 or the destination could be out of "
"sync." % (to_unset, doc)
)
# wholesale document replacement
if "$set" not in update_spec and "$unset" not in update_spec:
# update spec contains the new document in its entirety
return update_spec
else:
try:
# $set
for to_set in update_spec.get("$set", []):
value = update_spec["$set"][to_set]
_set_field(doc, to_set, value)
# $unset
for to_unset in update_spec.get("$unset", []):
_unset_field(doc, to_unset)
except (KeyError, ValueError, AttributeError, IndexError):
exc_t, exc_v, exc_tb = sys.exc_info()
msg = "Cannot apply update %r to %r" % (update_spec, doc)
raise UpdateDoesNotApply(msg).with_traceback(exc_tb)
return doc | python | def apply_update(self, doc, update_spec):
# Helper to cast a key for a list or dict, or raise ValueError
def _convert_or_raise(container, key):
if isinstance(container, dict):
return key
elif isinstance(container, list):
return int(key)
else:
raise ValueError
# Helper to retrieve (and/or create)
# a dot-separated path within a document.
def _retrieve_path(container, path, create=False):
looking_at = container
for part in path:
if isinstance(looking_at, dict):
if create and part not in looking_at:
looking_at[part] = {}
looking_at = looking_at[part]
elif isinstance(looking_at, list):
index = int(part)
# Do we need to create additional space in the array?
if create and len(looking_at) <= index:
# Fill buckets with None up to the index we need.
looking_at.extend([None] * (index - len(looking_at)))
# Bucket we need gets the empty dictionary.
looking_at.append({})
looking_at = looking_at[index]
else:
raise ValueError
return looking_at
def _set_field(doc, to_set, value):
if "." in to_set:
path = to_set.split(".")
where = _retrieve_path(doc, path[:-1], create=True)
index = _convert_or_raise(where, path[-1])
wl = len(where)
if isinstance(where, list) and index >= wl:
where.extend([None] * (index + 1 - wl))
where[index] = value
else:
doc[to_set] = value
def _unset_field(doc, to_unset):
try:
if "." in to_unset:
path = to_unset.split(".")
where = _retrieve_path(doc, path[:-1])
index_or_key = _convert_or_raise(where, path[-1])
if isinstance(where, list):
# Unset an array element sets it to null.
where[index_or_key] = None
else:
# Unset field removes it entirely.
del where[index_or_key]
else:
del doc[to_unset]
except (KeyError, IndexError, ValueError):
source_version = get_mininum_mongodb_version()
if source_version is None or source_version.at_least(2, 6):
raise
# Ignore unset errors since MongoDB 2.4 records invalid
# $unsets in the oplog.
LOG.warning(
"Could not unset field %r from document %r. "
"This may be normal when replicating from "
"MongoDB 2.4 or the destination could be out of "
"sync." % (to_unset, doc)
)
# wholesale document replacement
if "$set" not in update_spec and "$unset" not in update_spec:
# update spec contains the new document in its entirety
return update_spec
else:
try:
# $set
for to_set in update_spec.get("$set", []):
value = update_spec["$set"][to_set]
_set_field(doc, to_set, value)
# $unset
for to_unset in update_spec.get("$unset", []):
_unset_field(doc, to_unset)
except (KeyError, ValueError, AttributeError, IndexError):
exc_t, exc_v, exc_tb = sys.exc_info()
msg = "Cannot apply update %r to %r" % (update_spec, doc)
raise UpdateDoesNotApply(msg).with_traceback(exc_tb)
return doc | [
"def",
"apply_update",
"(",
"self",
",",
"doc",
",",
"update_spec",
")",
":",
"# Helper to cast a key for a list or dict, or raise ValueError",
"def",
"_convert_or_raise",
"(",
"container",
",",
"key",
")",
":",
"if",
"isinstance",
"(",
"container",
",",
"dict",
")"... | Apply an update operation to a document. | [
"Apply",
"an",
"update",
"operation",
"to",
"a",
"document",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_base.py#L28-L120 |
226,659 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_base.py | DocManagerBase.bulk_upsert | def bulk_upsert(self, docs, namespace, timestamp):
"""Upsert each document in a set of documents.
This method may be overridden to upsert many documents at once.
"""
for doc in docs:
self.upsert(doc, namespace, timestamp) | python | def bulk_upsert(self, docs, namespace, timestamp):
for doc in docs:
self.upsert(doc, namespace, timestamp) | [
"def",
"bulk_upsert",
"(",
"self",
",",
"docs",
",",
"namespace",
",",
"timestamp",
")",
":",
"for",
"doc",
"in",
"docs",
":",
"self",
".",
"upsert",
"(",
"doc",
",",
"namespace",
",",
"timestamp",
")"
] | Upsert each document in a set of documents.
This method may be overridden to upsert many documents at once. | [
"Upsert",
"each",
"document",
"in",
"a",
"set",
"of",
"documents",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_base.py#L122-L128 |
226,660 | yougov/mongo-connector | mongo_connector/connector.py | log_startup_info | def log_startup_info():
"""Log info about the current environment."""
LOG.always("Starting mongo-connector version: %s", __version__)
if "dev" in __version__:
LOG.warning(
"This is a development version (%s) of mongo-connector", __version__
)
LOG.always("Python version: %s", sys.version)
LOG.always("Platform: %s", platform.platform())
if hasattr(pymongo, "__version__"):
pymongo_version = pymongo.__version__
else:
pymongo_version = pymongo.version
LOG.always("pymongo version: %s", pymongo_version)
if not pymongo.has_c():
LOG.warning(
"pymongo version %s was installed without the C extensions. "
'"InvalidBSON: Date value out of range" errors may occur if '
"there are documents with BSON Datetimes that represent times "
"outside of Python's datetime limit.",
pymongo.__version__,
) | python | def log_startup_info():
LOG.always("Starting mongo-connector version: %s", __version__)
if "dev" in __version__:
LOG.warning(
"This is a development version (%s) of mongo-connector", __version__
)
LOG.always("Python version: %s", sys.version)
LOG.always("Platform: %s", platform.platform())
if hasattr(pymongo, "__version__"):
pymongo_version = pymongo.__version__
else:
pymongo_version = pymongo.version
LOG.always("pymongo version: %s", pymongo_version)
if not pymongo.has_c():
LOG.warning(
"pymongo version %s was installed without the C extensions. "
'"InvalidBSON: Date value out of range" errors may occur if '
"there are documents with BSON Datetimes that represent times "
"outside of Python's datetime limit.",
pymongo.__version__,
) | [
"def",
"log_startup_info",
"(",
")",
":",
"LOG",
".",
"always",
"(",
"\"Starting mongo-connector version: %s\"",
",",
"__version__",
")",
"if",
"\"dev\"",
"in",
"__version__",
":",
"LOG",
".",
"warning",
"(",
"\"This is a development version (%s) of mongo-connector\"",
... | Log info about the current environment. | [
"Log",
"info",
"about",
"the",
"current",
"environment",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L1373-L1394 |
226,661 | yougov/mongo-connector | mongo_connector/connector.py | Connector.from_config | def from_config(cls, config):
"""Create a new Connector instance from a Config object."""
auth_key = None
password_file = config["authentication.passwordFile"]
if password_file is not None:
try:
auth_key = open(config["authentication.passwordFile"]).read()
auth_key = re.sub(r"\s", "", auth_key)
except IOError:
LOG.error("Could not load password file!")
sys.exit(1)
password = config["authentication.password"]
if password is not None:
auth_key = password
connector = Connector(
mongo_address=config["mainAddress"],
doc_managers=config["docManagers"],
oplog_checkpoint=os.path.abspath(config["oplogFile"]),
collection_dump=config["onlyDump"] or not config["noDump"],
only_dump=config["onlyDump"],
batch_size=config["batchSize"],
continue_on_error=config["continueOnError"],
auth_username=config["authentication.adminUsername"],
auth_key=auth_key,
fields=config["fields"],
exclude_fields=config["exclude_fields"],
ns_set=config["namespaces.include"],
ex_ns_set=config["namespaces.exclude"],
dest_mapping=config["namespaces.mapping"],
namespace_options=config["namespaces.namespace_options"],
gridfs_set=config["namespaces.gridfs"],
ssl_certfile=config["ssl.sslCertfile"],
ssl_keyfile=config["ssl.sslKeyfile"],
ssl_ca_certs=config["ssl.sslCACerts"],
ssl_cert_reqs=config["ssl.sslCertificatePolicy"],
tz_aware=config["timezoneAware"],
)
return connector | python | def from_config(cls, config):
auth_key = None
password_file = config["authentication.passwordFile"]
if password_file is not None:
try:
auth_key = open(config["authentication.passwordFile"]).read()
auth_key = re.sub(r"\s", "", auth_key)
except IOError:
LOG.error("Could not load password file!")
sys.exit(1)
password = config["authentication.password"]
if password is not None:
auth_key = password
connector = Connector(
mongo_address=config["mainAddress"],
doc_managers=config["docManagers"],
oplog_checkpoint=os.path.abspath(config["oplogFile"]),
collection_dump=config["onlyDump"] or not config["noDump"],
only_dump=config["onlyDump"],
batch_size=config["batchSize"],
continue_on_error=config["continueOnError"],
auth_username=config["authentication.adminUsername"],
auth_key=auth_key,
fields=config["fields"],
exclude_fields=config["exclude_fields"],
ns_set=config["namespaces.include"],
ex_ns_set=config["namespaces.exclude"],
dest_mapping=config["namespaces.mapping"],
namespace_options=config["namespaces.namespace_options"],
gridfs_set=config["namespaces.gridfs"],
ssl_certfile=config["ssl.sslCertfile"],
ssl_keyfile=config["ssl.sslKeyfile"],
ssl_ca_certs=config["ssl.sslCACerts"],
ssl_cert_reqs=config["ssl.sslCertificatePolicy"],
tz_aware=config["timezoneAware"],
)
return connector | [
"def",
"from_config",
"(",
"cls",
",",
"config",
")",
":",
"auth_key",
"=",
"None",
"password_file",
"=",
"config",
"[",
"\"authentication.passwordFile\"",
"]",
"if",
"password_file",
"is",
"not",
"None",
":",
"try",
":",
"auth_key",
"=",
"open",
"(",
"confi... | Create a new Connector instance from a Config object. | [
"Create",
"a",
"new",
"Connector",
"instance",
"from",
"a",
"Config",
"object",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L190-L227 |
226,662 | yougov/mongo-connector | mongo_connector/connector.py | Connector.join | def join(self):
""" Joins thread, stops it from running
"""
self.can_run = False
super(Connector, self).join()
for dm in self.doc_managers:
dm.stop() | python | def join(self):
self.can_run = False
super(Connector, self).join()
for dm in self.doc_managers:
dm.stop() | [
"def",
"join",
"(",
"self",
")",
":",
"self",
".",
"can_run",
"=",
"False",
"super",
"(",
"Connector",
",",
"self",
")",
".",
"join",
"(",
")",
"for",
"dm",
"in",
"self",
".",
"doc_managers",
":",
"dm",
".",
"stop",
"(",
")"
] | Joins thread, stops it from running | [
"Joins",
"thread",
"stops",
"it",
"from",
"running"
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L229-L235 |
226,663 | yougov/mongo-connector | mongo_connector/connector.py | Connector.write_oplog_progress | def write_oplog_progress(self):
""" Writes oplog progress to file provided by user
"""
if self.oplog_checkpoint is None:
return None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
items = [[name, util.bson_ts_to_long(oplog_dict[name])] for name in oplog_dict]
if not items:
return
# write to temp file
backup_file = self.oplog_checkpoint + ".backup"
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, "w") as dest:
if len(items) == 1:
# Write 1-dimensional array, as in previous versions.
json_str = json.dumps(items[0])
else:
# Write a 2d array to support sharded clusters.
json_str = json.dumps(items)
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, "r") as backup:
shutil.copyfile(backup, dest)
os.remove(backup_file) | python | def write_oplog_progress(self):
if self.oplog_checkpoint is None:
return None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
items = [[name, util.bson_ts_to_long(oplog_dict[name])] for name in oplog_dict]
if not items:
return
# write to temp file
backup_file = self.oplog_checkpoint + ".backup"
os.rename(self.oplog_checkpoint, backup_file)
# for each of the threads write to file
with open(self.oplog_checkpoint, "w") as dest:
if len(items) == 1:
# Write 1-dimensional array, as in previous versions.
json_str = json.dumps(items[0])
else:
# Write a 2d array to support sharded clusters.
json_str = json.dumps(items)
try:
dest.write(json_str)
except IOError:
# Basically wipe the file, copy from backup
dest.truncate()
with open(backup_file, "r") as backup:
shutil.copyfile(backup, dest)
os.remove(backup_file) | [
"def",
"write_oplog_progress",
"(",
"self",
")",
":",
"if",
"self",
".",
"oplog_checkpoint",
"is",
"None",
":",
"return",
"None",
"with",
"self",
".",
"oplog_progress",
"as",
"oplog_prog",
":",
"oplog_dict",
"=",
"oplog_prog",
".",
"get_dict",
"(",
")",
"ite... | Writes oplog progress to file provided by user | [
"Writes",
"oplog",
"progress",
"to",
"file",
"provided",
"by",
"user"
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L237-L270 |
226,664 | yougov/mongo-connector | mongo_connector/connector.py | Connector.read_oplog_progress | def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
LOG.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
with open(self.oplog_checkpoint, "r") as progress_file:
try:
data = json.load(progress_file)
except ValueError:
LOG.exception(
'Cannot read oplog progress file "%s". '
"It may be corrupt after Mongo Connector was shut down"
"uncleanly. You can try to recover from a backup file "
'(may be called "%s.backup") or create a new progress file '
"starting at the current moment in time by running "
"mongo-connector --no-dump <other options>. "
"You may also be trying to read an oplog progress file "
"created with the old format for sharded clusters. "
"See https://github.com/10gen-labs/mongo-connector/wiki"
"/Oplog-Progress-File for complete documentation."
% (self.oplog_checkpoint, self.oplog_checkpoint)
)
return
# data format:
# [name, timestamp] = replica set
# [[name, timestamp], [name, timestamp], ...] = sharded cluster
if not isinstance(data[0], list):
data = [data]
with self.oplog_progress:
self.oplog_progress.dict = dict(
(name, util.long_to_bson_ts(timestamp)) for name, timestamp in data
) | python | def read_oplog_progress(self):
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
LOG.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
with open(self.oplog_checkpoint, "r") as progress_file:
try:
data = json.load(progress_file)
except ValueError:
LOG.exception(
'Cannot read oplog progress file "%s". '
"It may be corrupt after Mongo Connector was shut down"
"uncleanly. You can try to recover from a backup file "
'(may be called "%s.backup") or create a new progress file '
"starting at the current moment in time by running "
"mongo-connector --no-dump <other options>. "
"You may also be trying to read an oplog progress file "
"created with the old format for sharded clusters. "
"See https://github.com/10gen-labs/mongo-connector/wiki"
"/Oplog-Progress-File for complete documentation."
% (self.oplog_checkpoint, self.oplog_checkpoint)
)
return
# data format:
# [name, timestamp] = replica set
# [[name, timestamp], [name, timestamp], ...] = sharded cluster
if not isinstance(data[0], list):
data = [data]
with self.oplog_progress:
self.oplog_progress.dict = dict(
(name, util.long_to_bson_ts(timestamp)) for name, timestamp in data
) | [
"def",
"read_oplog_progress",
"(",
"self",
")",
":",
"if",
"self",
".",
"oplog_checkpoint",
"is",
"None",
":",
"return",
"None",
"# Check for empty file",
"try",
":",
"if",
"os",
".",
"stat",
"(",
"self",
".",
"oplog_checkpoint",
")",
".",
"st_size",
"==",
... | Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed. | [
"Reads",
"oplog",
"progress",
"from",
"file",
"provided",
"by",
"user",
".",
"This",
"method",
"is",
"only",
"called",
"once",
"before",
"any",
"threads",
"are",
"spanwed",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L272-L314 |
226,665 | yougov/mongo-connector | mongo_connector/connector.py | Connector.copy_uri_options | def copy_uri_options(hosts, mongodb_uri):
"""Returns a MongoDB URI to hosts with the options from mongodb_uri.
"""
if "?" in mongodb_uri:
options = mongodb_uri.split("?", 1)[1]
else:
options = None
uri = "mongodb://" + hosts
if options:
uri += "/?" + options
return uri | python | def copy_uri_options(hosts, mongodb_uri):
if "?" in mongodb_uri:
options = mongodb_uri.split("?", 1)[1]
else:
options = None
uri = "mongodb://" + hosts
if options:
uri += "/?" + options
return uri | [
"def",
"copy_uri_options",
"(",
"hosts",
",",
"mongodb_uri",
")",
":",
"if",
"\"?\"",
"in",
"mongodb_uri",
":",
"options",
"=",
"mongodb_uri",
".",
"split",
"(",
"\"?\"",
",",
"1",
")",
"[",
"1",
"]",
"else",
":",
"options",
"=",
"None",
"uri",
"=",
... | Returns a MongoDB URI to hosts with the options from mongodb_uri. | [
"Returns",
"a",
"MongoDB",
"URI",
"to",
"hosts",
"with",
"the",
"options",
"from",
"mongodb_uri",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L317-L327 |
226,666 | yougov/mongo-connector | mongo_connector/connector.py | Connector.oplog_thread_join | def oplog_thread_join(self):
"""Stops all the OplogThreads
"""
LOG.info("MongoConnector: Stopping all OplogThreads")
for thread in self.shard_set.values():
thread.join() | python | def oplog_thread_join(self):
LOG.info("MongoConnector: Stopping all OplogThreads")
for thread in self.shard_set.values():
thread.join() | [
"def",
"oplog_thread_join",
"(",
"self",
")",
":",
"LOG",
".",
"info",
"(",
"\"MongoConnector: Stopping all OplogThreads\"",
")",
"for",
"thread",
"in",
"self",
".",
"shard_set",
".",
"values",
"(",
")",
":",
"thread",
".",
"join",
"(",
")"
] | Stops all the OplogThreads | [
"Stops",
"all",
"the",
"OplogThreads"
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/connector.py#L475-L480 |
226,667 | yougov/mongo-connector | mongo_connector/namespace_config.py | _character_matches | def _character_matches(name1, name2):
"""Yield the number of characters that match the beginning of each string.
"""
if name1[0] == "*":
for i in range(len(name2) + 1):
yield 1, i
if name2[0] == "*":
for i in range(len(name1) + 1):
yield i, 1
if name1[0] == name2[0]:
yield 1, 1 | python | def _character_matches(name1, name2):
if name1[0] == "*":
for i in range(len(name2) + 1):
yield 1, i
if name2[0] == "*":
for i in range(len(name1) + 1):
yield i, 1
if name1[0] == name2[0]:
yield 1, 1 | [
"def",
"_character_matches",
"(",
"name1",
",",
"name2",
")",
":",
"if",
"name1",
"[",
"0",
"]",
"==",
"\"*\"",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"name2",
")",
"+",
"1",
")",
":",
"yield",
"1",
",",
"i",
"if",
"name2",
"[",
"0",
... | Yield the number of characters that match the beginning of each string. | [
"Yield",
"the",
"number",
"of",
"characters",
"that",
"match",
"the",
"beginning",
"of",
"each",
"string",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L331-L341 |
226,668 | yougov/mongo-connector | mongo_connector/namespace_config.py | wildcards_overlap | def wildcards_overlap(name1, name2):
"""Return true if two wildcard patterns can match the same string."""
if not name1 and not name2:
return True
if not name1 or not name2:
return False
for matched1, matched2 in _character_matches(name1, name2):
if wildcards_overlap(name1[matched1:], name2[matched2:]):
return True
return False | python | def wildcards_overlap(name1, name2):
if not name1 and not name2:
return True
if not name1 or not name2:
return False
for matched1, matched2 in _character_matches(name1, name2):
if wildcards_overlap(name1[matched1:], name2[matched2:]):
return True
return False | [
"def",
"wildcards_overlap",
"(",
"name1",
",",
"name2",
")",
":",
"if",
"not",
"name1",
"and",
"not",
"name2",
":",
"return",
"True",
"if",
"not",
"name1",
"or",
"not",
"name2",
":",
"return",
"False",
"for",
"matched1",
",",
"matched2",
"in",
"_characte... | Return true if two wildcard patterns can match the same string. | [
"Return",
"true",
"if",
"two",
"wildcard",
"patterns",
"can",
"match",
"the",
"same",
"string",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L344-L353 |
226,669 | yougov/mongo-connector | mongo_connector/namespace_config.py | _validate_namespaces | def _validate_namespaces(namespaces):
"""Validate wildcards and renaming in namespaces.
Target namespaces should have the same number of wildcards as the source.
No target namespaces overlap exactly with each other. Logs a warning
when wildcard namespaces have a chance of overlapping.
"""
for source, namespace in namespaces.items():
target = namespace.dest_name
_validate_namespace(source)
_validate_namespace(target)
if source.count("*") > 1 or target.count("*") > 1:
raise errors.InvalidConfiguration(
"The namespace mapping from '%s' to '%s' cannot contain more "
"than one '*' character." % (source, target)
)
if source.count("*") != target.count("*"):
raise errors.InvalidConfiguration(
"The namespace mapping from '%s' to '%s' must contain the "
"same number of '*' characters." % (source, target)
)
if "*" not in source:
continue
# Make sure that wildcards are not moved from database name to
# collection name or vice versa, eg "db*.foo" => "db.foo_*"
if (
wildcard_in_db(source)
and not wildcard_in_db(target)
or (not wildcard_in_db(source) and wildcard_in_db(target))
):
raise errors.InvalidConfiguration(
"The namespace mapping from '%s' to '%s' is invalid. A '*' "
"that appears in the source database name must also appear"
"in the target database name. A '*' that appears in the "
"source collection name must also appear in the target "
"collection name" % (source, target)
)
for source1, source2 in combinations(namespaces.keys(), 2):
if wildcards_overlap(source1, source2):
LOG.warning(
'Namespaces "%s" and "%s" may match the ' "same source namespace.",
source1,
source2,
)
target1 = namespaces[source1].dest_name
target2 = namespaces[source2].dest_name
if target1 == target2:
raise errors.InvalidConfiguration(
"Multiple namespaces cannot be combined into one target "
"namespace. Trying to map '%s' to '%s' but '%s' already "
"corresponds to '%s' in the target system."
% (source2, target2, source1, target1)
)
if wildcards_overlap(target1, target2):
LOG.warning(
"Multiple namespaces cannot be combined into one target "
"namespace. Mapping from '%s' to '%s' might overlap "
"with mapping from '%s' to '%s'." % (source2, target2, source1, target1)
) | python | def _validate_namespaces(namespaces):
for source, namespace in namespaces.items():
target = namespace.dest_name
_validate_namespace(source)
_validate_namespace(target)
if source.count("*") > 1 or target.count("*") > 1:
raise errors.InvalidConfiguration(
"The namespace mapping from '%s' to '%s' cannot contain more "
"than one '*' character." % (source, target)
)
if source.count("*") != target.count("*"):
raise errors.InvalidConfiguration(
"The namespace mapping from '%s' to '%s' must contain the "
"same number of '*' characters." % (source, target)
)
if "*" not in source:
continue
# Make sure that wildcards are not moved from database name to
# collection name or vice versa, eg "db*.foo" => "db.foo_*"
if (
wildcard_in_db(source)
and not wildcard_in_db(target)
or (not wildcard_in_db(source) and wildcard_in_db(target))
):
raise errors.InvalidConfiguration(
"The namespace mapping from '%s' to '%s' is invalid. A '*' "
"that appears in the source database name must also appear"
"in the target database name. A '*' that appears in the "
"source collection name must also appear in the target "
"collection name" % (source, target)
)
for source1, source2 in combinations(namespaces.keys(), 2):
if wildcards_overlap(source1, source2):
LOG.warning(
'Namespaces "%s" and "%s" may match the ' "same source namespace.",
source1,
source2,
)
target1 = namespaces[source1].dest_name
target2 = namespaces[source2].dest_name
if target1 == target2:
raise errors.InvalidConfiguration(
"Multiple namespaces cannot be combined into one target "
"namespace. Trying to map '%s' to '%s' but '%s' already "
"corresponds to '%s' in the target system."
% (source2, target2, source1, target1)
)
if wildcards_overlap(target1, target2):
LOG.warning(
"Multiple namespaces cannot be combined into one target "
"namespace. Mapping from '%s' to '%s' might overlap "
"with mapping from '%s' to '%s'." % (source2, target2, source1, target1)
) | [
"def",
"_validate_namespaces",
"(",
"namespaces",
")",
":",
"for",
"source",
",",
"namespace",
"in",
"namespaces",
".",
"items",
"(",
")",
":",
"target",
"=",
"namespace",
".",
"dest_name",
"_validate_namespace",
"(",
"source",
")",
"_validate_namespace",
"(",
... | Validate wildcards and renaming in namespaces.
Target namespaces should have the same number of wildcards as the source.
No target namespaces overlap exactly with each other. Logs a warning
when wildcard namespaces have a chance of overlapping. | [
"Validate",
"wildcards",
"and",
"renaming",
"in",
"namespaces",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L362-L421 |
226,670 | yougov/mongo-connector | mongo_connector/namespace_config.py | _merge_namespace_options | def _merge_namespace_options(
namespace_set=None,
ex_namespace_set=None,
gridfs_set=None,
dest_mapping=None,
namespace_options=None,
include_fields=None,
exclude_fields=None,
):
"""Merges namespaces options together.
The first is the set of excluded namespaces and the second is a mapping
from source namespace to Namespace instances.
"""
namespace_set = set(namespace_set or [])
ex_namespace_set = set(ex_namespace_set or [])
gridfs_set = set(gridfs_set or [])
dest_mapping = dest_mapping or {}
namespace_options = namespace_options or {}
include_fields = set(include_fields or [])
exclude_fields = set(exclude_fields or [])
namespaces = {}
for source_name, options_or_str in namespace_options.items():
if isinstance(options_or_str, dict):
namespace_set.add(source_name)
if options_or_str.get("gridfs"):
gridfs_set.add(source_name)
namespaces[source_name] = Namespace(
dest_name=options_or_str.get("rename"),
include_fields=options_or_str.get("includeFields"),
exclude_fields=options_or_str.get("excludeFields"),
gridfs=options_or_str.get("gridfs", False),
)
elif isinstance(options_or_str, str):
namespace_set.add(source_name)
namespaces[source_name] = Namespace(dest_name=options_or_str)
elif options_or_str:
namespace_set.add(source_name)
else:
ex_namespace_set.add(source_name)
# Add namespaces that are renamed but not in namespace_options
for source_name, target_name in dest_mapping.items():
namespaces[source_name] = namespaces.get(source_name, Namespace()).with_options(
dest_name=target_name
)
# Add namespaces that are included but not in namespace_options
for included_name in namespace_set:
if included_name not in namespaces:
namespaces[included_name] = Namespace()
# Add namespaces that are excluded but not in namespace_options
for gridfs_name in gridfs_set:
namespaces[gridfs_name] = namespaces.get(gridfs_name, Namespace()).with_options(
gridfs=True
)
# Add source, destination name, and globally included and excluded fields
for included_name in namespaces:
namespace = namespaces[included_name]
namespace = namespace.with_options(
source_name=included_name,
include_fields=validate_include_fields(
include_fields, namespace.include_fields
),
exclude_fields=validate_exclude_fields(
exclude_fields, namespace.exclude_fields
),
)
# The default destination name is the same as the source.
if not namespace.dest_name:
namespace = namespace.with_options(dest_name=included_name)
namespaces[included_name] = namespace
return ex_namespace_set, namespaces | python | def _merge_namespace_options(
namespace_set=None,
ex_namespace_set=None,
gridfs_set=None,
dest_mapping=None,
namespace_options=None,
include_fields=None,
exclude_fields=None,
):
namespace_set = set(namespace_set or [])
ex_namespace_set = set(ex_namespace_set or [])
gridfs_set = set(gridfs_set or [])
dest_mapping = dest_mapping or {}
namespace_options = namespace_options or {}
include_fields = set(include_fields or [])
exclude_fields = set(exclude_fields or [])
namespaces = {}
for source_name, options_or_str in namespace_options.items():
if isinstance(options_or_str, dict):
namespace_set.add(source_name)
if options_or_str.get("gridfs"):
gridfs_set.add(source_name)
namespaces[source_name] = Namespace(
dest_name=options_or_str.get("rename"),
include_fields=options_or_str.get("includeFields"),
exclude_fields=options_or_str.get("excludeFields"),
gridfs=options_or_str.get("gridfs", False),
)
elif isinstance(options_or_str, str):
namespace_set.add(source_name)
namespaces[source_name] = Namespace(dest_name=options_or_str)
elif options_or_str:
namespace_set.add(source_name)
else:
ex_namespace_set.add(source_name)
# Add namespaces that are renamed but not in namespace_options
for source_name, target_name in dest_mapping.items():
namespaces[source_name] = namespaces.get(source_name, Namespace()).with_options(
dest_name=target_name
)
# Add namespaces that are included but not in namespace_options
for included_name in namespace_set:
if included_name not in namespaces:
namespaces[included_name] = Namespace()
# Add namespaces that are excluded but not in namespace_options
for gridfs_name in gridfs_set:
namespaces[gridfs_name] = namespaces.get(gridfs_name, Namespace()).with_options(
gridfs=True
)
# Add source, destination name, and globally included and excluded fields
for included_name in namespaces:
namespace = namespaces[included_name]
namespace = namespace.with_options(
source_name=included_name,
include_fields=validate_include_fields(
include_fields, namespace.include_fields
),
exclude_fields=validate_exclude_fields(
exclude_fields, namespace.exclude_fields
),
)
# The default destination name is the same as the source.
if not namespace.dest_name:
namespace = namespace.with_options(dest_name=included_name)
namespaces[included_name] = namespace
return ex_namespace_set, namespaces | [
"def",
"_merge_namespace_options",
"(",
"namespace_set",
"=",
"None",
",",
"ex_namespace_set",
"=",
"None",
",",
"gridfs_set",
"=",
"None",
",",
"dest_mapping",
"=",
"None",
",",
"namespace_options",
"=",
"None",
",",
"include_fields",
"=",
"None",
",",
"exclude... | Merges namespaces options together.
The first is the set of excluded namespaces and the second is a mapping
from source namespace to Namespace instances. | [
"Merges",
"namespaces",
"options",
"together",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L424-L500 |
226,671 | yougov/mongo-connector | mongo_connector/namespace_config.py | match_replace_regex | def match_replace_regex(regex, src_namespace, dest_namespace):
"""Return the new mapped namespace if the src_namespace matches the
regex."""
match = regex.match(src_namespace)
if match:
return dest_namespace.replace("*", match.group(1))
return None | python | def match_replace_regex(regex, src_namespace, dest_namespace):
match = regex.match(src_namespace)
if match:
return dest_namespace.replace("*", match.group(1))
return None | [
"def",
"match_replace_regex",
"(",
"regex",
",",
"src_namespace",
",",
"dest_namespace",
")",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"src_namespace",
")",
"if",
"match",
":",
"return",
"dest_namespace",
".",
"replace",
"(",
"\"*\"",
",",
"match",
"."... | Return the new mapped namespace if the src_namespace matches the
regex. | [
"Return",
"the",
"new",
"mapped",
"namespace",
"if",
"the",
"src_namespace",
"matches",
"the",
"regex",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L546-L552 |
226,672 | yougov/mongo-connector | mongo_connector/namespace_config.py | namespace_to_regex | def namespace_to_regex(namespace):
"""Create a RegexObject from a wildcard namespace."""
db_name, coll_name = namespace.split(".", 1)
# A database name cannot contain a '.' character
db_regex = re.escape(db_name).replace(r"\*", "([^.]*)")
# But a collection name can.
coll_regex = re.escape(coll_name).replace(r"\*", "(.*)")
return re.compile(r"\A" + db_regex + r"\." + coll_regex + r"\Z") | python | def namespace_to_regex(namespace):
db_name, coll_name = namespace.split(".", 1)
# A database name cannot contain a '.' character
db_regex = re.escape(db_name).replace(r"\*", "([^.]*)")
# But a collection name can.
coll_regex = re.escape(coll_name).replace(r"\*", "(.*)")
return re.compile(r"\A" + db_regex + r"\." + coll_regex + r"\Z") | [
"def",
"namespace_to_regex",
"(",
"namespace",
")",
":",
"db_name",
",",
"coll_name",
"=",
"namespace",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"# A database name cannot contain a '.' character",
"db_regex",
"=",
"re",
".",
"escape",
"(",
"db_name",
")",
".",... | Create a RegexObject from a wildcard namespace. | [
"Create",
"a",
"RegexObject",
"from",
"a",
"wildcard",
"namespace",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L560-L567 |
226,673 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig._register_namespace_and_command | def _register_namespace_and_command(self, namespace):
"""Add a Namespace and the corresponding command namespace."""
self._add_namespace(namespace)
# Add the namespace for commands on this database
cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd"
dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd"
self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name)) | python | def _register_namespace_and_command(self, namespace):
self._add_namespace(namespace)
# Add the namespace for commands on this database
cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd"
dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd"
self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name)) | [
"def",
"_register_namespace_and_command",
"(",
"self",
",",
"namespace",
")",
":",
"self",
".",
"_add_namespace",
"(",
"namespace",
")",
"# Add the namespace for commands on this database",
"cmd_name",
"=",
"namespace",
".",
"source_name",
".",
"split",
"(",
"\".\"",
... | Add a Namespace and the corresponding command namespace. | [
"Add",
"a",
"Namespace",
"and",
"the",
"corresponding",
"command",
"namespace",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L169-L175 |
226,674 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig._add_namespace | def _add_namespace(self, namespace):
"""Add an included and possibly renamed Namespace."""
src_name = namespace.source_name
if "*" in src_name:
self._regex_map.append((namespace_to_regex(src_name), namespace))
else:
self._add_plain_namespace(namespace) | python | def _add_namespace(self, namespace):
src_name = namespace.source_name
if "*" in src_name:
self._regex_map.append((namespace_to_regex(src_name), namespace))
else:
self._add_plain_namespace(namespace) | [
"def",
"_add_namespace",
"(",
"self",
",",
"namespace",
")",
":",
"src_name",
"=",
"namespace",
".",
"source_name",
"if",
"\"*\"",
"in",
"src_name",
":",
"self",
".",
"_regex_map",
".",
"append",
"(",
"(",
"namespace_to_regex",
"(",
"src_name",
")",
",",
"... | Add an included and possibly renamed Namespace. | [
"Add",
"an",
"included",
"and",
"possibly",
"renamed",
"Namespace",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L177-L183 |
226,675 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig._add_plain_namespace | def _add_plain_namespace(self, namespace):
"""Add an included and possibly renamed non-wildcard Namespace."""
src_name = namespace.source_name
target_name = namespace.dest_name
src_names = self._reverse_plain.setdefault(target_name, set())
src_names.add(src_name)
if len(src_names) > 1:
# Another source namespace is already mapped to this target
existing_src = (src_names - set([src_name])).pop()
raise errors.InvalidConfiguration(
"Multiple namespaces cannot be combined into one target "
"namespace. Trying to map '%s' to '%s' but there already "
"exists a mapping from '%s' to '%s'"
% (src_name, target_name, existing_src, target_name)
)
self._plain[src_name] = namespace
src_db, _ = src_name.split(".", 1)
target_db, _ = target_name.split(".", 1)
self._plain_db.setdefault(src_db, set()).add(target_db) | python | def _add_plain_namespace(self, namespace):
src_name = namespace.source_name
target_name = namespace.dest_name
src_names = self._reverse_plain.setdefault(target_name, set())
src_names.add(src_name)
if len(src_names) > 1:
# Another source namespace is already mapped to this target
existing_src = (src_names - set([src_name])).pop()
raise errors.InvalidConfiguration(
"Multiple namespaces cannot be combined into one target "
"namespace. Trying to map '%s' to '%s' but there already "
"exists a mapping from '%s' to '%s'"
% (src_name, target_name, existing_src, target_name)
)
self._plain[src_name] = namespace
src_db, _ = src_name.split(".", 1)
target_db, _ = target_name.split(".", 1)
self._plain_db.setdefault(src_db, set()).add(target_db) | [
"def",
"_add_plain_namespace",
"(",
"self",
",",
"namespace",
")",
":",
"src_name",
"=",
"namespace",
".",
"source_name",
"target_name",
"=",
"namespace",
".",
"dest_name",
"src_names",
"=",
"self",
".",
"_reverse_plain",
".",
"setdefault",
"(",
"target_name",
"... | Add an included and possibly renamed non-wildcard Namespace. | [
"Add",
"an",
"included",
"and",
"possibly",
"renamed",
"non",
"-",
"wildcard",
"Namespace",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L185-L204 |
226,676 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.lookup | def lookup(self, plain_src_ns):
"""Given a plain source namespace, return the corresponding Namespace
object, or None if it is not included.
"""
# Ignore the namespace if it is excluded.
if plain_src_ns in self._ex_namespace_set:
return None
# Include all namespaces if there are no included namespaces.
if not self._regex_map and not self._plain:
return Namespace(
dest_name=plain_src_ns,
source_name=plain_src_ns,
include_fields=self._include_fields,
exclude_fields=self._exclude_fields,
)
# First, search for the namespace in the plain namespaces.
try:
return self._plain[plain_src_ns]
except KeyError:
# Search for the namespace in the wildcard namespaces.
for regex, namespace in self._regex_map:
new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name)
if not new_name:
continue
# Save the new target Namespace in the plain namespaces so
# future lookups are fast.
new_namespace = namespace.with_options(
dest_name=new_name, source_name=plain_src_ns
)
self._add_plain_namespace(new_namespace)
return new_namespace
# Save the not included namespace to the excluded namespaces so
# that future lookups of the same namespace are fast.
self._ex_namespace_set.add(plain_src_ns)
return None | python | def lookup(self, plain_src_ns):
# Ignore the namespace if it is excluded.
if plain_src_ns in self._ex_namespace_set:
return None
# Include all namespaces if there are no included namespaces.
if not self._regex_map and not self._plain:
return Namespace(
dest_name=plain_src_ns,
source_name=plain_src_ns,
include_fields=self._include_fields,
exclude_fields=self._exclude_fields,
)
# First, search for the namespace in the plain namespaces.
try:
return self._plain[plain_src_ns]
except KeyError:
# Search for the namespace in the wildcard namespaces.
for regex, namespace in self._regex_map:
new_name = match_replace_regex(regex, plain_src_ns, namespace.dest_name)
if not new_name:
continue
# Save the new target Namespace in the plain namespaces so
# future lookups are fast.
new_namespace = namespace.with_options(
dest_name=new_name, source_name=plain_src_ns
)
self._add_plain_namespace(new_namespace)
return new_namespace
# Save the not included namespace to the excluded namespaces so
# that future lookups of the same namespace are fast.
self._ex_namespace_set.add(plain_src_ns)
return None | [
"def",
"lookup",
"(",
"self",
",",
"plain_src_ns",
")",
":",
"# Ignore the namespace if it is excluded.",
"if",
"plain_src_ns",
"in",
"self",
".",
"_ex_namespace_set",
":",
"return",
"None",
"# Include all namespaces if there are no included namespaces.",
"if",
"not",
"self... | Given a plain source namespace, return the corresponding Namespace
object, or None if it is not included. | [
"Given",
"a",
"plain",
"source",
"namespace",
"return",
"the",
"corresponding",
"Namespace",
"object",
"or",
"None",
"if",
"it",
"is",
"not",
"included",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L206-L241 |
226,677 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.map_namespace | def map_namespace(self, plain_src_ns):
"""Given a plain source namespace, return the corresponding plain
target namespace, or None if it is not included.
"""
namespace = self.lookup(plain_src_ns)
if namespace:
return namespace.dest_name
return None | python | def map_namespace(self, plain_src_ns):
namespace = self.lookup(plain_src_ns)
if namespace:
return namespace.dest_name
return None | [
"def",
"map_namespace",
"(",
"self",
",",
"plain_src_ns",
")",
":",
"namespace",
"=",
"self",
".",
"lookup",
"(",
"plain_src_ns",
")",
"if",
"namespace",
":",
"return",
"namespace",
".",
"dest_name",
"return",
"None"
] | Given a plain source namespace, return the corresponding plain
target namespace, or None if it is not included. | [
"Given",
"a",
"plain",
"source",
"namespace",
"return",
"the",
"corresponding",
"plain",
"target",
"namespace",
"or",
"None",
"if",
"it",
"is",
"not",
"included",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L243-L250 |
226,678 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.gridfs_namespace | def gridfs_namespace(self, plain_src_ns):
"""Given a plain source namespace, return the corresponding plain
target namespace if this namespace is a gridfs collection.
"""
namespace = self.lookup(plain_src_ns)
if namespace and namespace.gridfs:
return namespace.dest_name
return None | python | def gridfs_namespace(self, plain_src_ns):
namespace = self.lookup(plain_src_ns)
if namespace and namespace.gridfs:
return namespace.dest_name
return None | [
"def",
"gridfs_namespace",
"(",
"self",
",",
"plain_src_ns",
")",
":",
"namespace",
"=",
"self",
".",
"lookup",
"(",
"plain_src_ns",
")",
"if",
"namespace",
"and",
"namespace",
".",
"gridfs",
":",
"return",
"namespace",
".",
"dest_name",
"return",
"None"
] | Given a plain source namespace, return the corresponding plain
target namespace if this namespace is a gridfs collection. | [
"Given",
"a",
"plain",
"source",
"namespace",
"return",
"the",
"corresponding",
"plain",
"target",
"namespace",
"if",
"this",
"namespace",
"is",
"a",
"gridfs",
"collection",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L252-L259 |
226,679 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.unmap_namespace | def unmap_namespace(self, plain_target_ns):
"""Given a plain target namespace, return the corresponding source
namespace.
"""
# Return the same namespace if there are no included namespaces.
if not self._regex_map and not self._plain:
return plain_target_ns
src_name_set = self._reverse_plain.get(plain_target_ns)
if src_name_set:
# Return the first (and only) item in the set
for src_name in src_name_set:
return src_name
# The target namespace could also exist in the wildcard namespaces
for _, namespace in self._regex_map:
original_name = match_replace_regex(
namespace_to_regex(namespace.dest_name),
plain_target_ns,
namespace.source_name,
)
if original_name:
return original_name
return None | python | def unmap_namespace(self, plain_target_ns):
# Return the same namespace if there are no included namespaces.
if not self._regex_map and not self._plain:
return plain_target_ns
src_name_set = self._reverse_plain.get(plain_target_ns)
if src_name_set:
# Return the first (and only) item in the set
for src_name in src_name_set:
return src_name
# The target namespace could also exist in the wildcard namespaces
for _, namespace in self._regex_map:
original_name = match_replace_regex(
namespace_to_regex(namespace.dest_name),
plain_target_ns,
namespace.source_name,
)
if original_name:
return original_name
return None | [
"def",
"unmap_namespace",
"(",
"self",
",",
"plain_target_ns",
")",
":",
"# Return the same namespace if there are no included namespaces.",
"if",
"not",
"self",
".",
"_regex_map",
"and",
"not",
"self",
".",
"_plain",
":",
"return",
"plain_target_ns",
"src_name_set",
"=... | Given a plain target namespace, return the corresponding source
namespace. | [
"Given",
"a",
"plain",
"target",
"namespace",
"return",
"the",
"corresponding",
"source",
"namespace",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L261-L283 |
226,680 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.map_db | def map_db(self, plain_src_db):
"""Given a plain source database, return the list of target databases.
Individual collections in a database can be mapped to different
target databases, so map_db can return multiple databases. This
function must return all target database names so we make the
following restrictions on wildcards:
1) A wildcard appearing in the source database name must also appear
in the target database name, eg "db*.col" => "new_db_*.new_col".
2) A wildcard appearing in the source collection name must also appear
in the target collection name, eg "db.col*" => "new_db.new_col*".
This is used by the CommandHelper for the dropDatabase command.
"""
if not self._regex_map and not self._plain:
return [plain_src_db]
# Lookup this namespace to seed the plain_db dictionary
self.lookup(plain_src_db + ".$cmd")
return list(self._plain_db.get(plain_src_db, set())) | python | def map_db(self, plain_src_db):
if not self._regex_map and not self._plain:
return [plain_src_db]
# Lookup this namespace to seed the plain_db dictionary
self.lookup(plain_src_db + ".$cmd")
return list(self._plain_db.get(plain_src_db, set())) | [
"def",
"map_db",
"(",
"self",
",",
"plain_src_db",
")",
":",
"if",
"not",
"self",
".",
"_regex_map",
"and",
"not",
"self",
".",
"_plain",
":",
"return",
"[",
"plain_src_db",
"]",
"# Lookup this namespace to seed the plain_db dictionary",
"self",
".",
"lookup",
"... | Given a plain source database, return the list of target databases.
Individual collections in a database can be mapped to different
target databases, so map_db can return multiple databases. This
function must return all target database names so we make the
following restrictions on wildcards:
1) A wildcard appearing in the source database name must also appear
in the target database name, eg "db*.col" => "new_db_*.new_col".
2) A wildcard appearing in the source collection name must also appear
in the target collection name, eg "db.col*" => "new_db.new_col*".
This is used by the CommandHelper for the dropDatabase command. | [
"Given",
"a",
"plain",
"source",
"database",
"return",
"the",
"list",
"of",
"target",
"databases",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L285-L303 |
226,681 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.projection | def projection(self, plain_src_name):
"""Return the projection for the given source namespace."""
mapped = self.lookup(plain_src_name)
if not mapped:
return None
fields = mapped.include_fields or mapped.exclude_fields
if fields:
include = 1 if mapped.include_fields else 0
return dict((field, include) for field in fields)
return None | python | def projection(self, plain_src_name):
mapped = self.lookup(plain_src_name)
if not mapped:
return None
fields = mapped.include_fields or mapped.exclude_fields
if fields:
include = 1 if mapped.include_fields else 0
return dict((field, include) for field in fields)
return None | [
"def",
"projection",
"(",
"self",
",",
"plain_src_name",
")",
":",
"mapped",
"=",
"self",
".",
"lookup",
"(",
"plain_src_name",
")",
"if",
"not",
"mapped",
":",
"return",
"None",
"fields",
"=",
"mapped",
".",
"include_fields",
"or",
"mapped",
".",
"exclude... | Return the projection for the given source namespace. | [
"Return",
"the",
"projection",
"for",
"the",
"given",
"source",
"namespace",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L305-L314 |
226,682 | yougov/mongo-connector | mongo_connector/namespace_config.py | NamespaceConfig.get_included_databases | def get_included_databases(self):
"""Return the databases we want to include, or empty list for all.
"""
databases = set()
databases.update(self._plain_db.keys())
for _, namespace in self._regex_map:
database_name, _ = namespace.source_name.split(".", 1)
if "*" in database_name:
return []
databases.add(database_name)
return list(databases) | python | def get_included_databases(self):
databases = set()
databases.update(self._plain_db.keys())
for _, namespace in self._regex_map:
database_name, _ = namespace.source_name.split(".", 1)
if "*" in database_name:
return []
databases.add(database_name)
return list(databases) | [
"def",
"get_included_databases",
"(",
"self",
")",
":",
"databases",
"=",
"set",
"(",
")",
"databases",
".",
"update",
"(",
"self",
".",
"_plain_db",
".",
"keys",
"(",
")",
")",
"for",
"_",
",",
"namespace",
"in",
"self",
".",
"_regex_map",
":",
"datab... | Return the databases we want to include, or empty list for all. | [
"Return",
"the",
"databases",
"we",
"want",
"to",
"include",
"or",
"empty",
"list",
"for",
"all",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L316-L328 |
226,683 | yougov/mongo-connector | mongo_connector/doc_managers/mongo_doc_manager.py | DocManager._meta_collections | def _meta_collections(self):
"""Provides the meta collections currently being used
"""
if self.use_single_meta_collection:
yield self.meta_collection_name
else:
for name in self.meta_database.collection_names(
include_system_collections=False
):
yield name | python | def _meta_collections(self):
if self.use_single_meta_collection:
yield self.meta_collection_name
else:
for name in self.meta_database.collection_names(
include_system_collections=False
):
yield name | [
"def",
"_meta_collections",
"(",
"self",
")",
":",
"if",
"self",
".",
"use_single_meta_collection",
":",
"yield",
"self",
".",
"meta_collection_name",
"else",
":",
"for",
"name",
"in",
"self",
".",
"meta_database",
".",
"collection_names",
"(",
"include_system_col... | Provides the meta collections currently being used | [
"Provides",
"the",
"meta",
"collections",
"currently",
"being",
"used"
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/mongo_doc_manager.py#L115-L124 |
226,684 | yougov/mongo-connector | mongo_connector/doc_managers/mongo_doc_manager.py | DocManager.upsert | def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into Mongo
"""
database, coll = self._db_and_collection(namespace)
meta_collection_name = self._get_meta_collection(namespace)
self.meta_database[meta_collection_name].replace_one(
{self.id_field: doc["_id"], "ns": namespace},
{self.id_field: doc["_id"], "_ts": timestamp, "ns": namespace},
upsert=True,
)
self.mongo[database][coll].replace_one({"_id": doc["_id"]}, doc, upsert=True) | python | def upsert(self, doc, namespace, timestamp):
database, coll = self._db_and_collection(namespace)
meta_collection_name = self._get_meta_collection(namespace)
self.meta_database[meta_collection_name].replace_one(
{self.id_field: doc["_id"], "ns": namespace},
{self.id_field: doc["_id"], "_ts": timestamp, "ns": namespace},
upsert=True,
)
self.mongo[database][coll].replace_one({"_id": doc["_id"]}, doc, upsert=True) | [
"def",
"upsert",
"(",
"self",
",",
"doc",
",",
"namespace",
",",
"timestamp",
")",
":",
"database",
",",
"coll",
"=",
"self",
".",
"_db_and_collection",
"(",
"namespace",
")",
"meta_collection_name",
"=",
"self",
".",
"_get_meta_collection",
"(",
"namespace",
... | Update or insert a document into Mongo | [
"Update",
"or",
"insert",
"a",
"document",
"into",
"Mongo"
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/mongo_doc_manager.py#L190-L203 |
226,685 | yougov/mongo-connector | mongo_connector/doc_managers/mongo_doc_manager.py | DocManager.remove | def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
database, coll = self._db_and_collection(namespace)
meta_collection = self._get_meta_collection(namespace)
doc2 = self.meta_database[meta_collection].find_one_and_delete(
{self.id_field: document_id}
)
if doc2 and doc2.get("gridfs_id"):
GridFS(self.mongo[database], coll).delete(doc2["gridfs_id"])
else:
self.mongo[database][coll].delete_one({"_id": document_id}) | python | def remove(self, document_id, namespace, timestamp):
database, coll = self._db_and_collection(namespace)
meta_collection = self._get_meta_collection(namespace)
doc2 = self.meta_database[meta_collection].find_one_and_delete(
{self.id_field: document_id}
)
if doc2 and doc2.get("gridfs_id"):
GridFS(self.mongo[database], coll).delete(doc2["gridfs_id"])
else:
self.mongo[database][coll].delete_one({"_id": document_id}) | [
"def",
"remove",
"(",
"self",
",",
"document_id",
",",
"namespace",
",",
"timestamp",
")",
":",
"database",
",",
"coll",
"=",
"self",
".",
"_db_and_collection",
"(",
"namespace",
")",
"meta_collection",
"=",
"self",
".",
"_get_meta_collection",
"(",
"namespace... | Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields. | [
"Removes",
"document",
"from",
"Mongo"
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/mongo_doc_manager.py#L248-L264 |
226,686 | yougov/mongo-connector | mongo_connector/doc_managers/mongo_doc_manager.py | DocManager.search | def search(self, start_ts, end_ts):
"""Called to query Mongo for documents in a time range.
"""
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(
{"_ts": {"$lte": end_ts, "$gte": start_ts}}
):
yield ts_ns_doc | python | def search(self, start_ts, end_ts):
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(
{"_ts": {"$lte": end_ts, "$gte": start_ts}}
):
yield ts_ns_doc | [
"def",
"search",
"(",
"self",
",",
"start_ts",
",",
"end_ts",
")",
":",
"for",
"meta_collection_name",
"in",
"self",
".",
"_meta_collections",
"(",
")",
":",
"meta_coll",
"=",
"self",
".",
"meta_database",
"[",
"meta_collection_name",
"]",
"for",
"ts_ns_doc",
... | Called to query Mongo for documents in a time range. | [
"Called",
"to",
"query",
"Mongo",
"for",
"documents",
"in",
"a",
"time",
"range",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/mongo_doc_manager.py#L281-L289 |
226,687 | yougov/mongo-connector | mongo_connector/doc_managers/mongo_doc_manager.py | DocManager.get_last_doc | def get_last_doc(self):
"""Returns the last document stored in Mongo.
"""
def docs_by_ts():
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(limit=-1).sort("_ts", -1):
yield ts_ns_doc
return max(docs_by_ts(), key=lambda x: x["_ts"]) | python | def get_last_doc(self):
def docs_by_ts():
for meta_collection_name in self._meta_collections():
meta_coll = self.meta_database[meta_collection_name]
for ts_ns_doc in meta_coll.find(limit=-1).sort("_ts", -1):
yield ts_ns_doc
return max(docs_by_ts(), key=lambda x: x["_ts"]) | [
"def",
"get_last_doc",
"(",
"self",
")",
":",
"def",
"docs_by_ts",
"(",
")",
":",
"for",
"meta_collection_name",
"in",
"self",
".",
"_meta_collections",
"(",
")",
":",
"meta_coll",
"=",
"self",
".",
"meta_database",
"[",
"meta_collection_name",
"]",
"for",
"... | Returns the last document stored in Mongo. | [
"Returns",
"the",
"last",
"document",
"stored",
"in",
"Mongo",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/mongo_doc_manager.py#L297-L307 |
226,688 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | DocManager.upsert | def upsert(self, doc, namespace, timestamp):
"""Adds a document to the doc dict.
"""
# Allow exceptions to be triggered (for testing purposes)
if doc.get("_upsert_exception"):
raise Exception("upsert exception")
doc_id = doc["_id"]
self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp) | python | def upsert(self, doc, namespace, timestamp):
# Allow exceptions to be triggered (for testing purposes)
if doc.get("_upsert_exception"):
raise Exception("upsert exception")
doc_id = doc["_id"]
self.doc_dict[doc_id] = Entry(doc=doc, ns=namespace, ts=timestamp) | [
"def",
"upsert",
"(",
"self",
",",
"doc",
",",
"namespace",
",",
"timestamp",
")",
":",
"# Allow exceptions to be triggered (for testing purposes)",
"if",
"doc",
".",
"get",
"(",
"\"_upsert_exception\"",
")",
":",
"raise",
"Exception",
"(",
"\"upsert exception\"",
"... | Adds a document to the doc dict. | [
"Adds",
"a",
"document",
"to",
"the",
"doc",
"dict",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_simulator.py#L128-L137 |
226,689 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | DocManager.insert_file | def insert_file(self, f, namespace, timestamp):
"""Inserts a file to the doc dict.
"""
doc = f.get_metadata()
doc["content"] = f.read()
self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp) | python | def insert_file(self, f, namespace, timestamp):
doc = f.get_metadata()
doc["content"] = f.read()
self.doc_dict[f._id] = Entry(doc=doc, ns=namespace, ts=timestamp) | [
"def",
"insert_file",
"(",
"self",
",",
"f",
",",
"namespace",
",",
"timestamp",
")",
":",
"doc",
"=",
"f",
".",
"get_metadata",
"(",
")",
"doc",
"[",
"\"content\"",
"]",
"=",
"f",
".",
"read",
"(",
")",
"self",
".",
"doc_dict",
"[",
"f",
".",
"_... | Inserts a file to the doc dict. | [
"Inserts",
"a",
"file",
"to",
"the",
"doc",
"dict",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_simulator.py#L139-L144 |
226,690 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | DocManager.remove | def remove(self, document_id, namespace, timestamp):
"""Removes the document from the doc dict.
"""
try:
entry = self.doc_dict[document_id]
entry.doc = None
entry.update(namespace, timestamp)
except KeyError:
raise OperationFailed("Document does not exist: %s" % document_id) | python | def remove(self, document_id, namespace, timestamp):
try:
entry = self.doc_dict[document_id]
entry.doc = None
entry.update(namespace, timestamp)
except KeyError:
raise OperationFailed("Document does not exist: %s" % document_id) | [
"def",
"remove",
"(",
"self",
",",
"document_id",
",",
"namespace",
",",
"timestamp",
")",
":",
"try",
":",
"entry",
"=",
"self",
".",
"doc_dict",
"[",
"document_id",
"]",
"entry",
".",
"doc",
"=",
"None",
"entry",
".",
"update",
"(",
"namespace",
",",... | Removes the document from the doc dict. | [
"Removes",
"the",
"document",
"from",
"the",
"doc",
"dict",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_simulator.py#L146-L154 |
226,691 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | DocManager.search | def search(self, start_ts, end_ts):
"""Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend.
"""
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict | python | def search(self, start_ts, end_ts):
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.ts <= end_ts or entry.ts >= start_ts:
yield entry.meta_dict | [
"def",
"search",
"(",
"self",
",",
"start_ts",
",",
"end_ts",
")",
":",
"for",
"_id",
"in",
"self",
".",
"doc_dict",
":",
"entry",
"=",
"self",
".",
"doc_dict",
"[",
"_id",
"]",
"if",
"entry",
".",
"ts",
"<=",
"end_ts",
"or",
"entry",
".",
"ts",
... | Searches through all documents and finds all documents that were
modified or deleted within the range.
Since we have very few documents in the doc dict when this is called,
linear search is fine. This method is only used by rollbacks to query
all the documents in the target engine within a certain timestamp
window. The input will be two longs (converted from Bson timestamp)
which specify the time range. The start_ts refers to the timestamp
of the last oplog entry after a rollback. The end_ts is the timestamp
of the last document committed to the backend. | [
"Searches",
"through",
"all",
"documents",
"and",
"finds",
"all",
"documents",
"that",
"were",
"modified",
"or",
"deleted",
"within",
"the",
"range",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_simulator.py#L156-L171 |
226,692 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | DocManager.get_last_doc | def get_last_doc(self):
"""Searches through the doc dict to find the document that was
modified or deleted most recently."""
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict | python | def get_last_doc(self):
return max(self.doc_dict.values(), key=lambda x: x.ts).meta_dict | [
"def",
"get_last_doc",
"(",
"self",
")",
":",
"return",
"max",
"(",
"self",
".",
"doc_dict",
".",
"values",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"ts",
")",
".",
"meta_dict"
] | Searches through the doc dict to find the document that was
modified or deleted most recently. | [
"Searches",
"through",
"the",
"doc",
"dict",
"to",
"find",
"the",
"document",
"that",
"was",
"modified",
"or",
"deleted",
"most",
"recently",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_simulator.py#L178-L181 |
226,693 | yougov/mongo-connector | mongo_connector/doc_managers/doc_manager_simulator.py | DocManager._search | def _search(self):
"""Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend.
"""
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results | python | def _search(self):
results = []
for _id in self.doc_dict:
entry = self.doc_dict[_id]
if entry.doc is not None:
results.append(entry.merged_dict)
return results | [
"def",
"_search",
"(",
"self",
")",
":",
"results",
"=",
"[",
"]",
"for",
"_id",
"in",
"self",
".",
"doc_dict",
":",
"entry",
"=",
"self",
".",
"doc_dict",
"[",
"_id",
"]",
"if",
"entry",
".",
"doc",
"is",
"not",
"None",
":",
"results",
".",
"app... | Returns all documents in the doc dict.
This function is not a part of the DocManager API, and is only used
to simulate searching all documents from a backend. | [
"Returns",
"all",
"documents",
"in",
"the",
"doc",
"dict",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/doc_managers/doc_manager_simulator.py#L186-L197 |
226,694 | yougov/mongo-connector | mongo_connector/oplog_manager.py | OplogThread._should_skip_entry | def _should_skip_entry(self, entry):
"""Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts.
"""
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry["op"] == "n":
return True, False
ns = entry["ns"]
if "." not in ns:
return True, False
coll = ns.split(".", 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith(".chunks"):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[: -len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug(
"OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,)
)
return True, False
# Update the namespace.
entry["ns"] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry,
include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields,
):
return True, False
return False, is_gridfs_file | python | def _should_skip_entry(self, entry):
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry["op"] == "n":
return True, False
ns = entry["ns"]
if "." not in ns:
return True, False
coll = ns.split(".", 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith(".chunks"):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[: -len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug(
"OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,)
)
return True, False
# Update the namespace.
entry["ns"] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry,
include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields,
):
return True, False
return False, is_gridfs_file | [
"def",
"_should_skip_entry",
"(",
"self",
",",
"entry",
")",
":",
"# Don't replicate entries resulting from chunk moves",
"if",
"entry",
".",
"get",
"(",
"\"fromMigrate\"",
")",
":",
"return",
"True",
",",
"False",
"# Ignore no-ops",
"if",
"entry",
"[",
"\"op\"",
... | Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts. | [
"Determine",
"if",
"this",
"oplog",
"entry",
"should",
"be",
"skipped",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L146-L207 |
226,695 | yougov/mongo-connector | mongo_connector/oplog_manager.py | OplogThread.join | def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self) | python | def join(self):
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self) | [
"def",
"join",
"(",
"self",
")",
":",
"LOG",
".",
"debug",
"(",
"\"OplogThread: exiting due to join call.\"",
")",
"self",
".",
"running",
"=",
"False",
"threading",
".",
"Thread",
".",
"join",
"(",
"self",
")"
] | Stop this thread from managing the oplog. | [
"Stop",
"this",
"thread",
"from",
"managing",
"the",
"oplog",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L368-L373 |
226,696 | yougov/mongo-connector | mongo_connector/oplog_manager.py | OplogThread._find_field | def _find_field(cls, field, doc):
"""Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present.
"""
path = field.split(".")
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return [] | python | def _find_field(cls, field, doc):
path = field.split(".")
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return [] | [
"def",
"_find_field",
"(",
"cls",
",",
"field",
",",
"doc",
")",
":",
"path",
"=",
"field",
".",
"split",
"(",
"\".\"",
")",
"try",
":",
"for",
"key",
"in",
"path",
":",
"doc",
"=",
"doc",
"[",
"key",
"]",
"return",
"[",
"(",
"path",
",",
"doc"... | Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present. | [
"Find",
"the",
"field",
"in",
"the",
"document",
"which",
"matches",
"the",
"given",
"field",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L376-L389 |
226,697 | yougov/mongo-connector | mongo_connector/oplog_manager.py | OplogThread._find_update_fields | def _find_update_fields(cls, field, doc):
"""Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present.
"""
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == ".":
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == ".":
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1 :], doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches()) | python | def _find_update_fields(cls, field, doc):
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == ".":
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == ".":
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1 :], doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches()) | [
"def",
"_find_update_fields",
"(",
"cls",
",",
"field",
",",
"doc",
")",
":",
"def",
"find_partial_matches",
"(",
")",
":",
"for",
"key",
"in",
"doc",
":",
"if",
"len",
"(",
"key",
")",
">",
"len",
"(",
"field",
")",
":",
"# Handle case where field is a ... | Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present. | [
"Find",
"the",
"fields",
"in",
"the",
"update",
"document",
"which",
"match",
"the",
"given",
"field",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L392-L428 |
226,698 | yougov/mongo-connector | mongo_connector/oplog_manager.py | OplogThread.filter_oplog_entry | def filter_oplog_entry(self, entry, include_fields=None, exclude_fields=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry["o"]
# Version 3.6 of mongodb includes a $v,
# see https://jira.mongodb.org/browse/SERVER-32240
if "$v" in entry_o:
entry_o.pop("$v")
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry["op"] == "i":
entry["o"] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry["op"] == "u" and ("$set" in entry_o or "$unset" in entry_o):
if "$set" in entry_o:
entry["o"]["$set"] = filter_fields(entry_o["$set"], fields, update=True)
if "$unset" in entry_o:
entry["o"]["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True
)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o["$set"]:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o["$unset"]:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry["op"] == "u":
entry["o"] = filter_fields(entry_o, fields)
return entry | python | def filter_oplog_entry(self, entry, include_fields=None, exclude_fields=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry["o"]
# Version 3.6 of mongodb includes a $v,
# see https://jira.mongodb.org/browse/SERVER-32240
if "$v" in entry_o:
entry_o.pop("$v")
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry["op"] == "i":
entry["o"] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry["op"] == "u" and ("$set" in entry_o or "$unset" in entry_o):
if "$set" in entry_o:
entry["o"]["$set"] = filter_fields(entry_o["$set"], fields, update=True)
if "$unset" in entry_o:
entry["o"]["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True
)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o["$set"]:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o["$unset"]:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry["op"] == "u":
entry["o"] = filter_fields(entry_o, fields)
return entry | [
"def",
"filter_oplog_entry",
"(",
"self",
",",
"entry",
",",
"include_fields",
"=",
"None",
",",
"exclude_fields",
"=",
"None",
")",
":",
"if",
"not",
"include_fields",
"and",
"not",
"exclude_fields",
":",
"return",
"entry",
"elif",
"include_fields",
":",
"fil... | Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2 | [
"Remove",
"fields",
"from",
"an",
"oplog",
"entry",
"that",
"should",
"not",
"be",
"replicated",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L456-L498 |
226,699 | yougov/mongo-connector | mongo_connector/oplog_manager.py | OplogThread.get_oplog_cursor | def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {"op": {"$ne": "n"}}
if timestamp is None:
cursor = self.oplog.find(query, cursor_type=CursorType.TAILABLE_AWAIT)
else:
query["ts"] = {"$gte": timestamp}
cursor = self.oplog.find(
query, cursor_type=CursorType.TAILABLE_AWAIT, oplog_replay=True
)
return cursor | python | def get_oplog_cursor(self, timestamp=None):
query = {"op": {"$ne": "n"}}
if timestamp is None:
cursor = self.oplog.find(query, cursor_type=CursorType.TAILABLE_AWAIT)
else:
query["ts"] = {"$gte": timestamp}
cursor = self.oplog.find(
query, cursor_type=CursorType.TAILABLE_AWAIT, oplog_replay=True
)
return cursor | [
"def",
"get_oplog_cursor",
"(",
"self",
",",
"timestamp",
"=",
"None",
")",
":",
"query",
"=",
"{",
"\"op\"",
":",
"{",
"\"$ne\"",
":",
"\"n\"",
"}",
"}",
"if",
"timestamp",
"is",
"None",
":",
"cursor",
"=",
"self",
".",
"oplog",
".",
"find",
"(",
... | Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog. | [
"Get",
"a",
"cursor",
"to",
"the",
"oplog",
"after",
"the",
"given",
"timestamp",
"excluding",
"no",
"-",
"op",
"entries",
"."
] | 557cafd4b54c848cd54ef28a258391a154650cb4 | https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/oplog_manager.py#L500-L514 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.