repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_family_control_chars
def com_google_fonts_check_family_control_chars(ttFonts): """Does font file include unacceptable control character glyphs?""" # list of unacceptable control character glyph names # definition includes the entire control character Unicode block except: # - .null (U+0000) # - CR (U+000D) unacceptable_cc_list = [ "uni0001", "uni0002", "uni0003", "uni0004", "uni0005", "uni0006", "uni0007", "uni0008", "uni0009", "uni000A", "uni000B", "uni000C", "uni000E", "uni000F", "uni0010", "uni0011", "uni0012", "uni0013", "uni0014", "uni0015", "uni0016", "uni0017", "uni0018", "uni0019", "uni001A", "uni001B", "uni001C", "uni001D", "uni001E", "uni001F" ] # a dict with key:value of font path that failed check : list of unacceptable glyph names failed_font_dict = {} for ttFont in ttFonts: font_failed = False unacceptable_glyphs_in_set = [] # a list of unacceptable glyph names identified glyph_name_set = set(ttFont["glyf"].glyphs.keys()) fontname = ttFont.reader.file.name for unacceptable_glyph_name in unacceptable_cc_list: if unacceptable_glyph_name in glyph_name_set: font_failed = True unacceptable_glyphs_in_set.append(unacceptable_glyph_name) if font_failed: failed_font_dict[fontname] = unacceptable_glyphs_in_set if len(failed_font_dict) > 0: unacceptable_cc_report_string = "The following unacceptable control characters were identified:\n" for fnt in failed_font_dict.keys(): unacceptable_cc_report_string += " {}: {}\n".format( fnt, ", ".join(failed_font_dict[fnt]) ) yield FAIL, ("{}".format(unacceptable_cc_report_string)) else: yield PASS, ("Unacceptable control characters were not identified.")
python
def com_google_fonts_check_family_control_chars(ttFonts): """Does font file include unacceptable control character glyphs?""" # list of unacceptable control character glyph names # definition includes the entire control character Unicode block except: # - .null (U+0000) # - CR (U+000D) unacceptable_cc_list = [ "uni0001", "uni0002", "uni0003", "uni0004", "uni0005", "uni0006", "uni0007", "uni0008", "uni0009", "uni000A", "uni000B", "uni000C", "uni000E", "uni000F", "uni0010", "uni0011", "uni0012", "uni0013", "uni0014", "uni0015", "uni0016", "uni0017", "uni0018", "uni0019", "uni001A", "uni001B", "uni001C", "uni001D", "uni001E", "uni001F" ] # a dict with key:value of font path that failed check : list of unacceptable glyph names failed_font_dict = {} for ttFont in ttFonts: font_failed = False unacceptable_glyphs_in_set = [] # a list of unacceptable glyph names identified glyph_name_set = set(ttFont["glyf"].glyphs.keys()) fontname = ttFont.reader.file.name for unacceptable_glyph_name in unacceptable_cc_list: if unacceptable_glyph_name in glyph_name_set: font_failed = True unacceptable_glyphs_in_set.append(unacceptable_glyph_name) if font_failed: failed_font_dict[fontname] = unacceptable_glyphs_in_set if len(failed_font_dict) > 0: unacceptable_cc_report_string = "The following unacceptable control characters were identified:\n" for fnt in failed_font_dict.keys(): unacceptable_cc_report_string += " {}: {}\n".format( fnt, ", ".join(failed_font_dict[fnt]) ) yield FAIL, ("{}".format(unacceptable_cc_report_string)) else: yield PASS, ("Unacceptable control characters were not identified.")
[ "def", "com_google_fonts_check_family_control_chars", "(", "ttFonts", ")", ":", "unacceptable_cc_list", "=", "[", "\"uni0001\"", ",", "\"uni0002\"", ",", "\"uni0003\"", ",", "\"uni0004\"", ",", "\"uni0005\"", ",", "\"uni0006\"", ",", "\"uni0007\"", ",", "\"uni0008\"", ...
Does font file include unacceptable control character glyphs?
[ "Does", "font", "file", "include", "unacceptable", "control", "character", "glyphs?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3818-L3882
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
gfonts_repo_structure
def gfonts_repo_structure(fonts): """ The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ? """ from fontbakery.utils import get_absolute_path # FIXME: Improve this with more details # about the expected structure. abspath = get_absolute_path(fonts[0]) return abspath.split(os.path.sep)[-3] in ["ufl", "ofl", "apache"]
python
def gfonts_repo_structure(fonts): """ The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ? """ from fontbakery.utils import get_absolute_path # FIXME: Improve this with more details # about the expected structure. abspath = get_absolute_path(fonts[0]) return abspath.split(os.path.sep)[-3] in ["ufl", "ofl", "apache"]
[ "def", "gfonts_repo_structure", "(", "fonts", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_absolute_path", "abspath", "=", "get_absolute_path", "(", "fonts", "[", "0", "]", ")", "return", "abspath", ".", "split", "(", "os", ".", "path", ".", ...
The family at the given font path follows the files and directory structure typical of a font project hosted on the Google Fonts repo on GitHub ?
[ "The", "family", "at", "the", "given", "font", "path", "follows", "the", "files", "and", "directory", "structure", "typical", "of", "a", "font", "project", "hosted", "on", "the", "Google", "Fonts", "repo", "on", "GitHub", "?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3886-L3896
train
googlefonts/fontbakery
Lib/fontbakery/profiles/googlefonts.py
com_google_fonts_check_repo_dirname_match_nameid_1
def com_google_fonts_check_repo_dirname_match_nameid_1(fonts, gfonts_repo_structure): """Directory name in GFonts repo structure must match NameID 1 of the regular.""" from fontTools.ttLib import TTFont from fontbakery.utils import (get_name_entry_strings, get_absolute_path, get_regular) regular = get_regular(fonts) if not regular: yield FAIL, "The font seems to lack a regular." entry = get_name_entry_strings(TTFont(regular), NameID.FONT_FAMILY_NAME)[0] expected = entry.lower() expected = "".join(expected.split(' ')) expected = "".join(expected.split('-')) license, familypath, filename = get_absolute_path(regular).split(os.path.sep)[-3:] if familypath == expected: yield PASS, "OK" else: yield FAIL, (f"Family name on the name table ('{entry}') does not match" f" directory name in the repo structure ('{familypath}')." f" Expected '{expected}'.")
python
def com_google_fonts_check_repo_dirname_match_nameid_1(fonts, gfonts_repo_structure): """Directory name in GFonts repo structure must match NameID 1 of the regular.""" from fontTools.ttLib import TTFont from fontbakery.utils import (get_name_entry_strings, get_absolute_path, get_regular) regular = get_regular(fonts) if not regular: yield FAIL, "The font seems to lack a regular." entry = get_name_entry_strings(TTFont(regular), NameID.FONT_FAMILY_NAME)[0] expected = entry.lower() expected = "".join(expected.split(' ')) expected = "".join(expected.split('-')) license, familypath, filename = get_absolute_path(regular).split(os.path.sep)[-3:] if familypath == expected: yield PASS, "OK" else: yield FAIL, (f"Family name on the name table ('{entry}') does not match" f" directory name in the repo structure ('{familypath}')." f" Expected '{expected}'.")
[ "def", "com_google_fonts_check_repo_dirname_match_nameid_1", "(", "fonts", ",", "gfonts_repo_structure", ")", ":", "from", "fontTools", ".", "ttLib", "import", "TTFont", "from", "fontbakery", ".", "utils", "import", "(", "get_name_entry_strings", ",", "get_absolute_path",...
Directory name in GFonts repo structure must match NameID 1 of the regular.
[ "Directory", "name", "in", "GFonts", "repo", "structure", "must", "match", "NameID", "1", "of", "the", "regular", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/googlefonts.py#L3906-L3929
train
googlefonts/fontbakery
Lib/fontbakery/profiles/os2.py
com_google_fonts_check_family_panose_proportion
def com_google_fonts_check_family_panose_proportion(ttFonts): """Fonts have consistent PANOSE proportion?""" failed = False proportion = None for ttFont in ttFonts: if proportion is None: proportion = ttFont['OS/2'].panose.bProportion if proportion != ttFont['OS/2'].panose.bProportion: failed = True if failed: yield FAIL, ("PANOSE proportion is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bProportion value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE proportion."
python
def com_google_fonts_check_family_panose_proportion(ttFonts): """Fonts have consistent PANOSE proportion?""" failed = False proportion = None for ttFont in ttFonts: if proportion is None: proportion = ttFont['OS/2'].panose.bProportion if proportion != ttFont['OS/2'].panose.bProportion: failed = True if failed: yield FAIL, ("PANOSE proportion is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bProportion value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE proportion."
[ "def", "com_google_fonts_check_family_panose_proportion", "(", "ttFonts", ")", ":", "failed", "=", "False", "proportion", "=", "None", "for", "ttFont", "in", "ttFonts", ":", "if", "proportion", "is", "None", ":", "proportion", "=", "ttFont", "[", "'OS/2'", "]", ...
Fonts have consistent PANOSE proportion?
[ "Fonts", "have", "consistent", "PANOSE", "proportion?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/os2.py#L14-L32
train
googlefonts/fontbakery
Lib/fontbakery/profiles/os2.py
com_google_fonts_check_family_panose_familytype
def com_google_fonts_check_family_panose_familytype(ttFonts): """Fonts have consistent PANOSE family type?""" failed = False familytype = None for ttfont in ttFonts: if familytype is None: familytype = ttfont['OS/2'].panose.bFamilyType if familytype != ttfont['OS/2'].panose.bFamilyType: failed = True if failed: yield FAIL, ("PANOSE family type is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bFamilyType value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE family type."
python
def com_google_fonts_check_family_panose_familytype(ttFonts): """Fonts have consistent PANOSE family type?""" failed = False familytype = None for ttfont in ttFonts: if familytype is None: familytype = ttfont['OS/2'].panose.bFamilyType if familytype != ttfont['OS/2'].panose.bFamilyType: failed = True if failed: yield FAIL, ("PANOSE family type is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bFamilyType value" " is the same in the OS/2 table of all of this family" " font files.") else: yield PASS, "Fonts have consistent PANOSE family type."
[ "def", "com_google_fonts_check_family_panose_familytype", "(", "ttFonts", ")", ":", "failed", "=", "False", "familytype", "=", "None", "for", "ttfont", "in", "ttFonts", ":", "if", "familytype", "is", "None", ":", "familytype", "=", "ttfont", "[", "'OS/2'", "]", ...
Fonts have consistent PANOSE family type?
[ "Fonts", "have", "consistent", "PANOSE", "family", "type?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/os2.py#L38-L56
train
googlefonts/fontbakery
Lib/fontbakery/profiles/os2.py
com_google_fonts_check_code_pages
def com_google_fonts_check_code_pages(ttFont): """Check code page character ranges""" if not hasattr(ttFont['OS/2'], "ulCodePageRange1") or \ not hasattr(ttFont['OS/2'], "ulCodePageRange2") or \ (ttFont['OS/2'].ulCodePageRange1 == 0 and \ ttFont['OS/2'].ulCodePageRange2 == 0): yield FAIL, ("No code pages defined in the OS/2 table" " ulCodePageRage1 and CodePageRage2 fields.") else: yield PASS, "At least one code page is defined."
python
def com_google_fonts_check_code_pages(ttFont): """Check code page character ranges""" if not hasattr(ttFont['OS/2'], "ulCodePageRange1") or \ not hasattr(ttFont['OS/2'], "ulCodePageRange2") or \ (ttFont['OS/2'].ulCodePageRange1 == 0 and \ ttFont['OS/2'].ulCodePageRange2 == 0): yield FAIL, ("No code pages defined in the OS/2 table" " ulCodePageRage1 and CodePageRage2 fields.") else: yield PASS, "At least one code page is defined."
[ "def", "com_google_fonts_check_code_pages", "(", "ttFont", ")", ":", "if", "not", "hasattr", "(", "ttFont", "[", "'OS/2'", "]", ",", "\"ulCodePageRange1\"", ")", "or", "not", "hasattr", "(", "ttFont", "[", "'OS/2'", "]", ",", "\"ulCodePageRange2\"", ")", "or",...
Check code page character ranges
[ "Check", "code", "page", "character", "ranges" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/os2.py#L245-L255
train
googlefonts/fontbakery
Lib/fontbakery/profiles/glyf.py
com_google_fonts_check_glyf_unused_data
def com_google_fonts_check_glyf_unused_data(ttFont): """Is there any unused data at the end of the glyf table?""" try: expected_glyphs = len(ttFont.getGlyphOrder()) actual_glyphs = len(ttFont['glyf'].glyphs) diff = actual_glyphs - expected_glyphs if diff < 0: yield FAIL, Message("unreachable-data", ("Glyf table has unreachable data at the end of " " the table. Expected glyf table length {}" " (from loca table), got length" " {} (difference: {})").format( expected_glyphs, actual_glyphs, diff)) elif not diff: # negative diff -> exception below yield PASS, "There is no unused data at the end of the glyf table." else: raise Exception("Bug: fontTools did not raise an expected exception.") except fontTools.ttLib.TTLibError as error: if "not enough 'glyf' table data" in format(error): yield FAIL, Message("missing-data", ("Loca table references data beyond" " the end of the glyf table." " Expected glyf table length {}" " (from loca table).").format(expected_glyphs)) else: raise Exception("Bug: Unexpected fontTools exception.")
python
def com_google_fonts_check_glyf_unused_data(ttFont): """Is there any unused data at the end of the glyf table?""" try: expected_glyphs = len(ttFont.getGlyphOrder()) actual_glyphs = len(ttFont['glyf'].glyphs) diff = actual_glyphs - expected_glyphs if diff < 0: yield FAIL, Message("unreachable-data", ("Glyf table has unreachable data at the end of " " the table. Expected glyf table length {}" " (from loca table), got length" " {} (difference: {})").format( expected_glyphs, actual_glyphs, diff)) elif not diff: # negative diff -> exception below yield PASS, "There is no unused data at the end of the glyf table." else: raise Exception("Bug: fontTools did not raise an expected exception.") except fontTools.ttLib.TTLibError as error: if "not enough 'glyf' table data" in format(error): yield FAIL, Message("missing-data", ("Loca table references data beyond" " the end of the glyf table." " Expected glyf table length {}" " (from loca table).").format(expected_glyphs)) else: raise Exception("Bug: Unexpected fontTools exception.")
[ "def", "com_google_fonts_check_glyf_unused_data", "(", "ttFont", ")", ":", "try", ":", "expected_glyphs", "=", "len", "(", "ttFont", ".", "getGlyphOrder", "(", ")", ")", "actual_glyphs", "=", "len", "(", "ttFont", "[", "'glyf'", "]", ".", "glyphs", ")", "dif...
Is there any unused data at the end of the glyf table?
[ "Is", "there", "any", "unused", "data", "at", "the", "end", "of", "the", "glyf", "table?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/glyf.py#L13-L39
train
googlefonts/fontbakery
Lib/fontbakery/profiles/glyf.py
com_google_fonts_check_points_out_of_bounds
def com_google_fonts_check_points_out_of_bounds(ttFont): """Check for points out of bounds.""" failed = False out_of_bounds = [] for glyphName in ttFont['glyf'].keys(): glyph = ttFont['glyf'][glyphName] coords = glyph.getCoordinates(ttFont['glyf'])[0] for x, y in coords: if x < glyph.xMin or x > glyph.xMax or \ y < glyph.yMin or y > glyph.yMax or \ abs(x) > 32766 or abs(y) > 32766: failed = True out_of_bounds.append((glyphName, x, y)) if failed: yield WARN, ("The following glyphs have coordinates which are" " out of bounds:\n{}\nThis happens a lot when points" " are not extremes, which is usually bad. However," " fixing this alert by adding points on extremes may" " do more harm than good, especially with italics," " calligraphic-script, handwriting, rounded and" " other fonts. So it is common to" " ignore this message".format(out_of_bounds)) else: yield PASS, "All glyph paths have coordinates within bounds!"
python
def com_google_fonts_check_points_out_of_bounds(ttFont): """Check for points out of bounds.""" failed = False out_of_bounds = [] for glyphName in ttFont['glyf'].keys(): glyph = ttFont['glyf'][glyphName] coords = glyph.getCoordinates(ttFont['glyf'])[0] for x, y in coords: if x < glyph.xMin or x > glyph.xMax or \ y < glyph.yMin or y > glyph.yMax or \ abs(x) > 32766 or abs(y) > 32766: failed = True out_of_bounds.append((glyphName, x, y)) if failed: yield WARN, ("The following glyphs have coordinates which are" " out of bounds:\n{}\nThis happens a lot when points" " are not extremes, which is usually bad. However," " fixing this alert by adding points on extremes may" " do more harm than good, especially with italics," " calligraphic-script, handwriting, rounded and" " other fonts. So it is common to" " ignore this message".format(out_of_bounds)) else: yield PASS, "All glyph paths have coordinates within bounds!"
[ "def", "com_google_fonts_check_points_out_of_bounds", "(", "ttFont", ")", ":", "failed", "=", "False", "out_of_bounds", "=", "[", "]", "for", "glyphName", "in", "ttFont", "[", "'glyf'", "]", ".", "keys", "(", ")", ":", "glyph", "=", "ttFont", "[", "'glyf'", ...
Check for points out of bounds.
[ "Check", "for", "points", "out", "of", "bounds", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/glyf.py#L51-L75
train
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_ufolint
def com_daltonmaag_check_ufolint(font): """Run ufolint on UFO source directory.""" import subprocess ufolint_cmd = ["ufolint", font] try: subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: yield FAIL, ("ufolint failed the UFO source. Output follows :" "\n\n{}\n").format(e.output.decode()) except OSError: yield ERROR, "ufolint is not available!" else: yield PASS, "ufolint passed the UFO source."
python
def com_daltonmaag_check_ufolint(font): """Run ufolint on UFO source directory.""" import subprocess ufolint_cmd = ["ufolint", font] try: subprocess.check_output(ufolint_cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: yield FAIL, ("ufolint failed the UFO source. Output follows :" "\n\n{}\n").format(e.output.decode()) except OSError: yield ERROR, "ufolint is not available!" else: yield PASS, "ufolint passed the UFO source."
[ "def", "com_daltonmaag_check_ufolint", "(", "font", ")", ":", "import", "subprocess", "ufolint_cmd", "=", "[", "\"ufolint\"", ",", "font", "]", "try", ":", "subprocess", ".", "check_output", "(", "ufolint_cmd", ",", "stderr", "=", "subprocess", ".", "STDOUT", ...
Run ufolint on UFO source directory.
[ "Run", "ufolint", "on", "UFO", "source", "directory", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L91-L104
train
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_required_fields
def com_daltonmaag_check_required_fields(ufo_font): """Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName. """ recommended_fields = [] for field in [ "unitsPerEm", "ascender", "descender", "xHeight", "capHeight", "familyName" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield FAIL, f"Required field(s) missing: {recommended_fields}" else: yield PASS, "Required fields present."
python
def com_daltonmaag_check_required_fields(ufo_font): """Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName. """ recommended_fields = [] for field in [ "unitsPerEm", "ascender", "descender", "xHeight", "capHeight", "familyName" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield FAIL, f"Required field(s) missing: {recommended_fields}" else: yield PASS, "Required fields present."
[ "def", "com_daltonmaag_check_required_fields", "(", "ufo_font", ")", ":", "recommended_fields", "=", "[", "]", "for", "field", "in", "[", "\"unitsPerEm\"", ",", "\"ascender\"", ",", "\"descender\"", ",", "\"xHeight\"", ",", "\"capHeight\"", ",", "\"familyName\"", "]...
Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName.
[ "Check", "that", "required", "fields", "are", "present", "in", "the", "UFO", "fontinfo", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L111-L129
train
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_recommended_fields
def com_daltonmaag_check_recommended_fields(ufo_font): """Check that recommended fields are present in the UFO fontinfo. This includes fields that should be in any production font. """ recommended_fields = [] for field in [ "postscriptUnderlineThickness", "postscriptUnderlinePosition", "versionMajor", "versionMinor", "styleName", "copyright", "openTypeOS2Panose" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield WARN, f"Recommended field(s) missing: {recommended_fields}" else: yield PASS, "Recommended fields present."
python
def com_daltonmaag_check_recommended_fields(ufo_font): """Check that recommended fields are present in the UFO fontinfo. This includes fields that should be in any production font. """ recommended_fields = [] for field in [ "postscriptUnderlineThickness", "postscriptUnderlinePosition", "versionMajor", "versionMinor", "styleName", "copyright", "openTypeOS2Panose" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield WARN, f"Recommended field(s) missing: {recommended_fields}" else: yield PASS, "Recommended fields present."
[ "def", "com_daltonmaag_check_recommended_fields", "(", "ufo_font", ")", ":", "recommended_fields", "=", "[", "]", "for", "field", "in", "[", "\"postscriptUnderlineThickness\"", ",", "\"postscriptUnderlinePosition\"", ",", "\"versionMajor\"", ",", "\"versionMinor\"", ",", ...
Check that recommended fields are present in the UFO fontinfo. This includes fields that should be in any production font.
[ "Check", "that", "recommended", "fields", "are", "present", "in", "the", "UFO", "fontinfo", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L136-L154
train
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
com_daltonmaag_check_unnecessary_fields
def com_daltonmaag_check_unnecessary_fields(ufo_font): """Check that no unnecessary fields are present in the UFO fontinfo. ufo2ft will generate these. openTypeOS2UnicodeRanges and openTypeOS2CodePageRanges are exempted because it is useful to toggle a range when not _all_ the glyphs in that region are present. year is deprecated since UFO v2. """ unnecessary_fields = [] for field in [ "openTypeNameUniqueID", "openTypeNameVersion", "postscriptUniqueID", "year" ]: if ufo_font.info.__dict__.get("_" + field) is not None: unnecessary_fields.append(field) if unnecessary_fields: yield WARN, f"Unnecessary field(s) present: {unnecessary_fields}" else: yield PASS, "Unnecessary fields omitted."
python
def com_daltonmaag_check_unnecessary_fields(ufo_font): """Check that no unnecessary fields are present in the UFO fontinfo. ufo2ft will generate these. openTypeOS2UnicodeRanges and openTypeOS2CodePageRanges are exempted because it is useful to toggle a range when not _all_ the glyphs in that region are present. year is deprecated since UFO v2. """ unnecessary_fields = [] for field in [ "openTypeNameUniqueID", "openTypeNameVersion", "postscriptUniqueID", "year" ]: if ufo_font.info.__dict__.get("_" + field) is not None: unnecessary_fields.append(field) if unnecessary_fields: yield WARN, f"Unnecessary field(s) present: {unnecessary_fields}" else: yield PASS, "Unnecessary fields omitted."
[ "def", "com_daltonmaag_check_unnecessary_fields", "(", "ufo_font", ")", ":", "unnecessary_fields", "=", "[", "]", "for", "field", "in", "[", "\"openTypeNameUniqueID\"", ",", "\"openTypeNameVersion\"", ",", "\"postscriptUniqueID\"", ",", "\"year\"", "]", ":", "if", "uf...
Check that no unnecessary fields are present in the UFO fontinfo. ufo2ft will generate these. openTypeOS2UnicodeRanges and openTypeOS2CodePageRanges are exempted because it is useful to toggle a range when not _all_ the glyphs in that region are present. year is deprecated since UFO v2.
[ "Check", "that", "no", "unnecessary", "fields", "are", "present", "in", "the", "UFO", "fontinfo", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L161-L184
train
googlefonts/fontbakery
Lib/fontbakery/profiles/ufo_sources.py
UFOProfile.setup_argparse
def setup_argparse(self, argument_parser): """Set up custom arguments needed for this profile.""" import glob import logging import argparse def get_fonts(pattern): fonts_to_check = [] # use glob.glob to accept *.ufo for fullpath in glob.glob(pattern): fullpath_absolute = os.path.abspath(fullpath) if fullpath_absolute.lower().endswith(".ufo") and os.path.isdir( fullpath_absolute): fonts_to_check.append(fullpath) else: logging.warning( ("Skipping '{}' as it does not seem " "to be valid UFO source directory.").format(fullpath)) return fonts_to_check class MergeAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): target = [item for l in values for item in l] setattr(namespace, self.dest, target) argument_parser.add_argument( 'fonts', # To allow optional commands like "-L" to work without other input # files: nargs='*', type=get_fonts, action=MergeAction, help='font file path(s) to check.' ' Wildcards like *.ufo are allowed.') return ('fonts',)
python
def setup_argparse(self, argument_parser): """Set up custom arguments needed for this profile.""" import glob import logging import argparse def get_fonts(pattern): fonts_to_check = [] # use glob.glob to accept *.ufo for fullpath in glob.glob(pattern): fullpath_absolute = os.path.abspath(fullpath) if fullpath_absolute.lower().endswith(".ufo") and os.path.isdir( fullpath_absolute): fonts_to_check.append(fullpath) else: logging.warning( ("Skipping '{}' as it does not seem " "to be valid UFO source directory.").format(fullpath)) return fonts_to_check class MergeAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): target = [item for l in values for item in l] setattr(namespace, self.dest, target) argument_parser.add_argument( 'fonts', # To allow optional commands like "-L" to work without other input # files: nargs='*', type=get_fonts, action=MergeAction, help='font file path(s) to check.' ' Wildcards like *.ufo are allowed.') return ('fonts',)
[ "def", "setup_argparse", "(", "self", ",", "argument_parser", ")", ":", "import", "glob", "import", "logging", "import", "argparse", "def", "get_fonts", "(", "pattern", ")", ":", "fonts_to_check", "=", "[", "]", "for", "fullpath", "in", "glob", ".", "glob", ...
Set up custom arguments needed for this profile.
[ "Set", "up", "custom", "arguments", "needed", "for", "this", "profile", "." ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/ufo_sources.py#L11-L49
train
googlefonts/fontbakery
Lib/fontbakery/profiles/hmtx.py
com_google_fonts_check_whitespace_widths
def com_google_fonts_check_whitespace_widths(ttFont): """Whitespace and non-breaking space have the same width?""" from fontbakery.utils import get_glyph_name space_name = get_glyph_name(ttFont, 0x0020) nbsp_name = get_glyph_name(ttFont, 0x00A0) space_width = ttFont['hmtx'][space_name][0] nbsp_width = ttFont['hmtx'][nbsp_name][0] if space_width > 0 and space_width == nbsp_width: yield PASS, "Whitespace and non-breaking space have the same width." else: yield FAIL, ("Whitespace and non-breaking space have differing width:" " Whitespace ({}) is {} font units wide, non-breaking space" " ({}) is {} font units wide. Both should be positive and the" " same.").format(space_name, space_width, nbsp_name, nbsp_width)
python
def com_google_fonts_check_whitespace_widths(ttFont): """Whitespace and non-breaking space have the same width?""" from fontbakery.utils import get_glyph_name space_name = get_glyph_name(ttFont, 0x0020) nbsp_name = get_glyph_name(ttFont, 0x00A0) space_width = ttFont['hmtx'][space_name][0] nbsp_width = ttFont['hmtx'][nbsp_name][0] if space_width > 0 and space_width == nbsp_width: yield PASS, "Whitespace and non-breaking space have the same width." else: yield FAIL, ("Whitespace and non-breaking space have differing width:" " Whitespace ({}) is {} font units wide, non-breaking space" " ({}) is {} font units wide. Both should be positive and the" " same.").format(space_name, space_width, nbsp_name, nbsp_width)
[ "def", "com_google_fonts_check_whitespace_widths", "(", "ttFont", ")", ":", "from", "fontbakery", ".", "utils", "import", "get_glyph_name", "space_name", "=", "get_glyph_name", "(", "ttFont", ",", "0x0020", ")", "nbsp_name", "=", "get_glyph_name", "(", "ttFont", ","...
Whitespace and non-breaking space have the same width?
[ "Whitespace", "and", "non", "-", "breaking", "space", "have", "the", "same", "width?" ]
b355aea2e619a4477769e060d24c32448aa65399
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/profiles/hmtx.py#L13-L30
train
Kuniwak/vint
vint/linting/policy_set.py
PolicySet.update_by_config
def update_by_config(self, config_dict): """ Update policies set by the config dictionary. Expect the policy_enabling_map structure to be (represented by YAML): - PolicyFoo: enabled: True - PolicyBar: enabled: False additional_field: 'is_ok' """ policy_enabling_map = self._get_enabling_map(config_dict) self.enabled_policies = [] for policy_name, is_policy_enabled in policy_enabling_map.items(): if not self._is_policy_exists(policy_name): self._warn_unexistent_policy(policy_name) continue if is_policy_enabled: enabled_policy = self._get_policy(policy_name) self.enabled_policies.append(enabled_policy)
python
def update_by_config(self, config_dict): """ Update policies set by the config dictionary. Expect the policy_enabling_map structure to be (represented by YAML): - PolicyFoo: enabled: True - PolicyBar: enabled: False additional_field: 'is_ok' """ policy_enabling_map = self._get_enabling_map(config_dict) self.enabled_policies = [] for policy_name, is_policy_enabled in policy_enabling_map.items(): if not self._is_policy_exists(policy_name): self._warn_unexistent_policy(policy_name) continue if is_policy_enabled: enabled_policy = self._get_policy(policy_name) self.enabled_policies.append(enabled_policy)
[ "def", "update_by_config", "(", "self", ",", "config_dict", ")", ":", "policy_enabling_map", "=", "self", ".", "_get_enabling_map", "(", "config_dict", ")", "self", ".", "enabled_policies", "=", "[", "]", "for", "policy_name", ",", "is_policy_enabled", "in", "po...
Update policies set by the config dictionary. Expect the policy_enabling_map structure to be (represented by YAML): - PolicyFoo: enabled: True - PolicyBar: enabled: False additional_field: 'is_ok'
[ "Update", "policies", "set", "by", "the", "config", "dictionary", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy_set.py#L48-L68
train
Kuniwak/vint
vint/linting/cli.py
_build_cmdargs
def _build_cmdargs(argv): """ Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure. """ parser = _build_arg_parser() namespace = parser.parse_args(argv[1:]) cmdargs = vars(namespace) return cmdargs
python
def _build_cmdargs(argv): """ Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure. """ parser = _build_arg_parser() namespace = parser.parse_args(argv[1:]) cmdargs = vars(namespace) return cmdargs
[ "def", "_build_cmdargs", "(", "argv", ")", ":", "parser", "=", "_build_arg_parser", "(", ")", "namespace", "=", "parser", ".", "parse_args", "(", "argv", "[", "1", ":", "]", ")", "cmdargs", "=", "vars", "(", "namespace", ")", "return", "cmdargs" ]
Build command line arguments dict to use; - displaying usages - vint.linting.env.build_environment This method take an argv parameter to make function pure.
[ "Build", "command", "line", "arguments", "dict", "to", "use", ";", "-", "displaying", "usages", "-", "vint", ".", "linting", ".", "env", ".", "build_environment" ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/cli.py#L79-L90
train
Kuniwak/vint
vint/ast/parsing.py
Parser.parse
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any] """ Parse vim script file and return the AST. """ decoder = Decoder(default_decoding_strategy) decoded = decoder.decode(lint_target.read()) decoded_and_lf_normalized = decoded.replace('\r\n', '\n') return self.parse_string(decoded_and_lf_normalized)
python
def parse(self, lint_target): # type: (AbstractLintTarget) -> Dict[str, Any] """ Parse vim script file and return the AST. """ decoder = Decoder(default_decoding_strategy) decoded = decoder.decode(lint_target.read()) decoded_and_lf_normalized = decoded.replace('\r\n', '\n') return self.parse_string(decoded_and_lf_normalized)
[ "def", "parse", "(", "self", ",", "lint_target", ")", ":", "decoder", "=", "Decoder", "(", "default_decoding_strategy", ")", "decoded", "=", "decoder", ".", "decode", "(", "lint_target", ".", "read", "(", ")", ")", "decoded_and_lf_normalized", "=", "decoded", ...
Parse vim script file and return the AST.
[ "Parse", "vim", "script", "file", "and", "return", "the", "AST", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L19-L25
train
Kuniwak/vint
vint/ast/parsing.py
Parser.parse_string
def parse_string(self, string): # type: (str) -> Dict[str, Any] """ Parse vim script string and return the AST. """ lines = string.split('\n') reader = vimlparser.StringReader(lines) parser = vimlparser.VimLParser(self._enable_neovim) ast = parser.parse(reader) # TOPLEVEL does not have a pos, but we need pos for all nodes ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1} for plugin in self.plugins: plugin.process(ast) return ast
python
def parse_string(self, string): # type: (str) -> Dict[str, Any] """ Parse vim script string and return the AST. """ lines = string.split('\n') reader = vimlparser.StringReader(lines) parser = vimlparser.VimLParser(self._enable_neovim) ast = parser.parse(reader) # TOPLEVEL does not have a pos, but we need pos for all nodes ast['pos'] = {'col': 1, 'i': 0, 'lnum': 1} for plugin in self.plugins: plugin.process(ast) return ast
[ "def", "parse_string", "(", "self", ",", "string", ")", ":", "lines", "=", "string", ".", "split", "(", "'\\n'", ")", "reader", "=", "vimlparser", ".", "StringReader", "(", "lines", ")", "parser", "=", "vimlparser", ".", "VimLParser", "(", "self", ".", ...
Parse vim script string and return the AST.
[ "Parse", "vim", "script", "string", "and", "return", "the", "AST", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L28-L42
train
Kuniwak/vint
vint/ast/parsing.py
Parser.parse_string_expr
def parse_string_expr(self, string_expr_node): """ Parse a string node content. """ string_expr_node_value = string_expr_node['value'] string_expr_str = string_expr_node_value[1:-1] # Care escaped string literals if string_expr_node_value[0] == "'": string_expr_str = string_expr_str.replace("''", "'") else: string_expr_str = string_expr_str.replace('\\"', '"') # NOTE: This is a hack to parse expr1. See :help expr1 raw_ast = self.parse_string('echo ' + string_expr_str) # We need the left node of ECHO node parsed_string_expr_nodes = raw_ast['body'][0]['list'] start_pos = string_expr_node['pos'] def adjust_position(node): pos = node['pos'] # Care 1-based index and the length of "echo ". pos['col'] += start_pos['col'] - 1 - 5 # Care the length of "echo ". pos['i'] += start_pos['i'] - 5 # Care 1-based index pos['lnum'] += start_pos['lnum'] - 1 for parsed_string_expr_node in parsed_string_expr_nodes: traverse(parsed_string_expr_node, on_enter=adjust_position) return parsed_string_expr_nodes
python
def parse_string_expr(self, string_expr_node): """ Parse a string node content. """ string_expr_node_value = string_expr_node['value'] string_expr_str = string_expr_node_value[1:-1] # Care escaped string literals if string_expr_node_value[0] == "'": string_expr_str = string_expr_str.replace("''", "'") else: string_expr_str = string_expr_str.replace('\\"', '"') # NOTE: This is a hack to parse expr1. See :help expr1 raw_ast = self.parse_string('echo ' + string_expr_str) # We need the left node of ECHO node parsed_string_expr_nodes = raw_ast['body'][0]['list'] start_pos = string_expr_node['pos'] def adjust_position(node): pos = node['pos'] # Care 1-based index and the length of "echo ". pos['col'] += start_pos['col'] - 1 - 5 # Care the length of "echo ". pos['i'] += start_pos['i'] - 5 # Care 1-based index pos['lnum'] += start_pos['lnum'] - 1 for parsed_string_expr_node in parsed_string_expr_nodes: traverse(parsed_string_expr_node, on_enter=adjust_position) return parsed_string_expr_nodes
[ "def", "parse_string_expr", "(", "self", ",", "string_expr_node", ")", ":", "string_expr_node_value", "=", "string_expr_node", "[", "'value'", "]", "string_expr_str", "=", "string_expr_node_value", "[", "1", ":", "-", "1", "]", "if", "string_expr_node_value", "[", ...
Parse a string node content.
[ "Parse", "a", "string", "node", "content", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/parsing.py#L87-L121
train
Kuniwak/vint
vint/ast/plugin/scope_plugin/scope_detector.py
is_builtin_variable
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin identifier. """ # Builtin variables are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if id_value.startswith('v:'): # It is an explicit builtin variable such as: "v:count", "v:char" # TODO: Add unknown builtin flag return True if is_builtin_function(id_node): return True if id_value in ['key', 'val']: # These builtin variable names are available on only map() or filter(). return is_on_lambda_string_context(id_node) # It is an implicit builtin variable such as: "count", "char" return id_value in BuiltinVariablesCanHaveImplicitScope
python
def is_builtin_variable(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin identifier. """ # Builtin variables are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if id_value.startswith('v:'): # It is an explicit builtin variable such as: "v:count", "v:char" # TODO: Add unknown builtin flag return True if is_builtin_function(id_node): return True if id_value in ['key', 'val']: # These builtin variable names are available on only map() or filter(). return is_on_lambda_string_context(id_node) # It is an implicit builtin variable such as: "count", "char" return id_value in BuiltinVariablesCanHaveImplicitScope
[ "def", "is_builtin_variable", "(", "id_node", ")", ":", "if", "NodeType", "(", "id_node", "[", "'type'", "]", ")", "is", "not", "NodeType", ".", "IDENTIFIER", ":", "return", "False", "id_value", "=", "id_node", "[", "'value'", "]", "if", "id_value", ".", ...
Whether the specified node is a builtin identifier.
[ "Whether", "the", "specified", "node", "is", "a", "builtin", "identifier", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L69-L90
train
Kuniwak/vint
vint/ast/plugin/scope_plugin/scope_detector.py
is_builtin_function
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin function name identifier. The given identifier should be a child node of NodeType.CALL. """ # Builtin functions are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if not is_function_identifier(id_node): return False # There are difference between a function identifier and variable # identifier: # # let localtime = 0 # echo localtime " => 0 # echo localtime() " => 1420011455 return id_value in BuiltinFunctions
python
def is_builtin_function(id_node): # type: (Dict[str, Any]) -> bool """ Whether the specified node is a builtin function name identifier. The given identifier should be a child node of NodeType.CALL. """ # Builtin functions are always IDENTIFIER. if NodeType(id_node['type']) is not NodeType.IDENTIFIER: return False id_value = id_node['value'] if not is_function_identifier(id_node): return False # There are difference between a function identifier and variable # identifier: # # let localtime = 0 # echo localtime " => 0 # echo localtime() " => 1420011455 return id_value in BuiltinFunctions
[ "def", "is_builtin_function", "(", "id_node", ")", ":", "if", "NodeType", "(", "id_node", "[", "'type'", "]", ")", "is", "not", "NodeType", ".", "IDENTIFIER", ":", "return", "False", "id_value", "=", "id_node", "[", "'value'", "]", "if", "not", "is_functio...
Whether the specified node is a builtin function name identifier. The given identifier should be a child node of NodeType.CALL.
[ "Whether", "the", "specified", "node", "is", "a", "builtin", "function", "name", "identifier", ".", "The", "given", "identifier", "should", "be", "a", "child", "node", "of", "NodeType", ".", "CALL", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_detector.py#L93-L112
train
Kuniwak/vint
vint/ast/plugin/scope_plugin/identifier_classifier.py
IdentifierClassifier.attach_identifier_attributes
def attach_identifier_attributes(self, ast): # type: (Dict[str, Any]) -> Dict[str, Any] """ Attach 5 flags to the AST. - is dynamic: True if the identifier name can be determined by static analysis. - is member: True if the identifier is a member of a subscription/dot/slice node. - is declaring: True if the identifier is used to declare. - is autoload: True if the identifier is declared with autoload. - is function: True if the identifier is a function. Vim distinguish between function identifiers and variable identifiers. - is declarative parameter: True if the identifier is a declarative parameter. For example, the identifier "param" in Func(param) is a declarative parameter. - is on string expression context: True if the variable is on the string expression context. The string expression context is the string content on the 2nd argument of the map or filter function. - is lambda argument: True if the identifier is a lambda argument. """ redir_assignment_parser = RedirAssignmentParser() ast_with_parsed_redir = redir_assignment_parser.process(ast) map_and_filter_parser = CallNodeParser() ast_with_parse_map_and_filter_and_redir = \ map_and_filter_parser.process(ast_with_parsed_redir) traverse( ast_with_parse_map_and_filter_and_redir, on_enter=lambda node: self._enter_handler( node, is_on_lambda_str=None, is_on_lambda_body=None, ) ) return ast
python
def attach_identifier_attributes(self, ast): # type: (Dict[str, Any]) -> Dict[str, Any] """ Attach 5 flags to the AST. - is dynamic: True if the identifier name can be determined by static analysis. - is member: True if the identifier is a member of a subscription/dot/slice node. - is declaring: True if the identifier is used to declare. - is autoload: True if the identifier is declared with autoload. - is function: True if the identifier is a function. Vim distinguish between function identifiers and variable identifiers. - is declarative parameter: True if the identifier is a declarative parameter. For example, the identifier "param" in Func(param) is a declarative parameter. - is on string expression context: True if the variable is on the string expression context. The string expression context is the string content on the 2nd argument of the map or filter function. - is lambda argument: True if the identifier is a lambda argument. """ redir_assignment_parser = RedirAssignmentParser() ast_with_parsed_redir = redir_assignment_parser.process(ast) map_and_filter_parser = CallNodeParser() ast_with_parse_map_and_filter_and_redir = \ map_and_filter_parser.process(ast_with_parsed_redir) traverse( ast_with_parse_map_and_filter_and_redir, on_enter=lambda node: self._enter_handler( node, is_on_lambda_str=None, is_on_lambda_body=None, ) ) return ast
[ "def", "attach_identifier_attributes", "(", "self", ",", "ast", ")", ":", "redir_assignment_parser", "=", "RedirAssignmentParser", "(", ")", "ast_with_parsed_redir", "=", "redir_assignment_parser", ".", "process", "(", "ast", ")", "map_and_filter_parser", "=", "CallNode...
Attach 5 flags to the AST. - is dynamic: True if the identifier name can be determined by static analysis. - is member: True if the identifier is a member of a subscription/dot/slice node. - is declaring: True if the identifier is used to declare. - is autoload: True if the identifier is declared with autoload. - is function: True if the identifier is a function. Vim distinguish between function identifiers and variable identifiers. - is declarative parameter: True if the identifier is a declarative parameter. For example, the identifier "param" in Func(param) is a declarative parameter. - is on string expression context: True if the variable is on the string expression context. The string expression context is the string content on the 2nd argument of the map or filter function. - is lambda argument: True if the identifier is a lambda argument.
[ "Attach", "5", "flags", "to", "the", "AST", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/identifier_classifier.py#L118-L150
train
Kuniwak/vint
vint/linting/policy/abstract_policy.py
AbstractPolicy.create_violation_report
def create_violation_report(self, node, lint_context): """ Returns a violation report for the node. """ return { 'name': self.name, 'level': self.level, 'description': self.description, 'reference': self.reference, 'position': { 'line': node['pos']['lnum'], 'column': node['pos']['col'], 'path': lint_context['lint_target'].path, }, }
python
def create_violation_report(self, node, lint_context): """ Returns a violation report for the node. """ return { 'name': self.name, 'level': self.level, 'description': self.description, 'reference': self.reference, 'position': { 'line': node['pos']['lnum'], 'column': node['pos']['col'], 'path': lint_context['lint_target'].path, }, }
[ "def", "create_violation_report", "(", "self", ",", "node", ",", "lint_context", ")", ":", "return", "{", "'name'", ":", "self", ".", "name", ",", "'level'", ":", "self", ".", "level", ",", "'description'", ":", "self", ".", "description", ",", "'reference...
Returns a violation report for the node.
[ "Returns", "a", "violation", "report", "for", "the", "node", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L22-L34
train
Kuniwak/vint
vint/linting/policy/abstract_policy.py
AbstractPolicy.get_policy_config
def get_policy_config(self, lint_context): """ Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on config.policies.ProhibitSomethingEvil. """ policy_config = lint_context['config']\ .get('policies', {})\ .get(self.__class__.__name__, {}) return policy_config
python
def get_policy_config(self, lint_context): """ Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on config.policies.ProhibitSomethingEvil. """ policy_config = lint_context['config']\ .get('policies', {})\ .get(self.__class__.__name__, {}) return policy_config
[ "def", "get_policy_config", "(", "self", ",", "lint_context", ")", ":", "policy_config", "=", "lint_context", "[", "'config'", "]", ".", "get", "(", "'policies'", ",", "{", "}", ")", ".", "get", "(", "self", ".", "__class__", ".", "__name__", ",", "{", ...
Returns a config of the concrete policy. For example, a config of ProhibitSomethingEvil is located on config.policies.ProhibitSomethingEvil.
[ "Returns", "a", "config", "of", "the", "concrete", "policy", ".", "For", "example", "a", "config", "of", "ProhibitSomethingEvil", "is", "located", "on", "config", ".", "policies", ".", "ProhibitSomethingEvil", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L37-L46
train
Kuniwak/vint
vint/linting/policy/abstract_policy.py
AbstractPolicy.get_violation_if_found
def get_violation_if_found(self, node, lint_context): """ Returns a violation if the node is invalid. """ if self.is_valid(node, lint_context): return None return self.create_violation_report(node, lint_context)
python
def get_violation_if_found(self, node, lint_context): """ Returns a violation if the node is invalid. """ if self.is_valid(node, lint_context): return None return self.create_violation_report(node, lint_context)
[ "def", "get_violation_if_found", "(", "self", ",", "node", ",", "lint_context", ")", ":", "if", "self", ".", "is_valid", "(", "node", ",", "lint_context", ")", ":", "return", "None", "return", "self", ".", "create_violation_report", "(", "node", ",", "lint_c...
Returns a violation if the node is invalid.
[ "Returns", "a", "violation", "if", "the", "node", "is", "invalid", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/linting/policy/abstract_policy.py#L49-L54
train
Kuniwak/vint
vint/bootstrap.py
import_all_policies
def import_all_policies(): """ Import all policies that were registered by vint.linting.policy_registry. Dynamic policy importing is comprised of the 3 steps 1. Try to import all policy modules (then we can't know what policies exist) 2. In policy module, register itself by using vint.linting.policy_registry 3. After all policies registered by itself, we can get policy classes """ pkg_name = _get_policy_package_name_for_test() pkg_path_list = pkg_name.split('.') pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve()) for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]): if not is_pkg: module_fqn = pkg_name + '.' + module_name logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn)) importlib.import_module(module_fqn)
python
def import_all_policies(): """ Import all policies that were registered by vint.linting.policy_registry. Dynamic policy importing is comprised of the 3 steps 1. Try to import all policy modules (then we can't know what policies exist) 2. In policy module, register itself by using vint.linting.policy_registry 3. After all policies registered by itself, we can get policy classes """ pkg_name = _get_policy_package_name_for_test() pkg_path_list = pkg_name.split('.') pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve()) for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]): if not is_pkg: module_fqn = pkg_name + '.' + module_name logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn)) importlib.import_module(module_fqn)
[ "def", "import_all_policies", "(", ")", ":", "pkg_name", "=", "_get_policy_package_name_for_test", "(", ")", "pkg_path_list", "=", "pkg_name", ".", "split", "(", "'.'", ")", "pkg_path", "=", "str", "(", "Path", "(", "_get_vint_root", "(", ")", ",", "*", "pkg...
Import all policies that were registered by vint.linting.policy_registry. Dynamic policy importing is comprised of the 3 steps 1. Try to import all policy modules (then we can't know what policies exist) 2. In policy module, register itself by using vint.linting.policy_registry 3. After all policies registered by itself, we can get policy classes
[ "Import", "all", "policies", "that", "were", "registered", "by", "vint", ".", "linting", ".", "policy_registry", "." ]
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/bootstrap.py#L24-L41
train
Kuniwak/vint
vint/ast/plugin/scope_plugin/scope_linker.py
ScopeLinker.process
def process(self, ast): # type: (Dict[str, Any]) -> None """ Build a scope tree and links between scopes and identifiers by the specified ast. You can access the built scope tree and the built links by .scope_tree and .link_registry. """ id_classifier = IdentifierClassifier() attached_ast = id_classifier.attach_identifier_attributes(ast) # We are already in script local scope. self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL) traverse(attached_ast, on_enter=self._enter_handler, on_leave=self._leave_handler) self.scope_tree = self._scope_tree_builder.get_global_scope() self.link_registry = self._scope_tree_builder.link_registry
python
def process(self, ast): # type: (Dict[str, Any]) -> None """ Build a scope tree and links between scopes and identifiers by the specified ast. You can access the built scope tree and the built links by .scope_tree and .link_registry. """ id_classifier = IdentifierClassifier() attached_ast = id_classifier.attach_identifier_attributes(ast) # We are already in script local scope. self._scope_tree_builder.enter_new_scope(ScopeVisibility.SCRIPT_LOCAL) traverse(attached_ast, on_enter=self._enter_handler, on_leave=self._leave_handler) self.scope_tree = self._scope_tree_builder.get_global_scope() self.link_registry = self._scope_tree_builder.link_registry
[ "def", "process", "(", "self", ",", "ast", ")", ":", "id_classifier", "=", "IdentifierClassifier", "(", ")", "attached_ast", "=", "id_classifier", ".", "attach_identifier_attributes", "(", "ast", ")", "self", ".", "_scope_tree_builder", ".", "enter_new_scope", "("...
Build a scope tree and links between scopes and identifiers by the specified ast. You can access the built scope tree and the built links by .scope_tree and .link_registry.
[ "Build", "a", "scope", "tree", "and", "links", "between", "scopes", "and", "identifiers", "by", "the", "specified", "ast", ".", "You", "can", "access", "the", "built", "scope", "tree", "and", "the", "built", "links", "by", ".", "scope_tree", "and", ".", ...
db29337d859d88239c282c2e9d84c858f23a4a09
https://github.com/Kuniwak/vint/blob/db29337d859d88239c282c2e9d84c858f23a4a09/vint/ast/plugin/scope_plugin/scope_linker.py#L326-L342
train
mozilla/mozdownload
mozdownload/cli.py
cli
def cli(argv=None): """CLI entry point for mozdownload.""" kwargs = parse_arguments(argv or sys.argv[1:]) log_level = kwargs.pop('log_level') logging.basicConfig(format='%(levelname)s | %(message)s', level=log_level) logger = logging.getLogger(__name__) # Configure logging levels for sub modules. Set to ERROR by default. sub_log_level = logging.ERROR if log_level == logging.getLevelName(logging.DEBUG): sub_log_level = logging.DEBUG logging.getLogger('redo').setLevel(sub_log_level) logging.getLogger('requests').setLevel(sub_log_level) logging.getLogger('thclient').setLevel(sub_log_level) try: scraper_type = kwargs.pop('scraper_type') # If a URL has been specified use the direct scraper if kwargs.get('url'): scraper_type = 'direct' build = factory.FactoryScraper(scraper_type, **kwargs) if kwargs.get('print_url'): logger.info(build.url) else: build.download() except KeyboardInterrupt: logger.error('Download interrupted by the user')
python
def cli(argv=None): """CLI entry point for mozdownload.""" kwargs = parse_arguments(argv or sys.argv[1:]) log_level = kwargs.pop('log_level') logging.basicConfig(format='%(levelname)s | %(message)s', level=log_level) logger = logging.getLogger(__name__) # Configure logging levels for sub modules. Set to ERROR by default. sub_log_level = logging.ERROR if log_level == logging.getLevelName(logging.DEBUG): sub_log_level = logging.DEBUG logging.getLogger('redo').setLevel(sub_log_level) logging.getLogger('requests').setLevel(sub_log_level) logging.getLogger('thclient').setLevel(sub_log_level) try: scraper_type = kwargs.pop('scraper_type') # If a URL has been specified use the direct scraper if kwargs.get('url'): scraper_type = 'direct' build = factory.FactoryScraper(scraper_type, **kwargs) if kwargs.get('print_url'): logger.info(build.url) else: build.download() except KeyboardInterrupt: logger.error('Download interrupted by the user')
[ "def", "cli", "(", "argv", "=", "None", ")", ":", "kwargs", "=", "parse_arguments", "(", "argv", "or", "sys", ".", "argv", "[", "1", ":", "]", ")", "log_level", "=", "kwargs", ".", "pop", "(", "'log_level'", ")", "logging", ".", "basicConfig", "(", ...
CLI entry point for mozdownload.
[ "CLI", "entry", "point", "for", "mozdownload", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/cli.py#L146-L175
train
mozilla/mozdownload
mozdownload/treeherder.py
Treeherder.query_builds_by_revision
def query_builds_by_revision(self, revision, job_type_name='Build', debug_build=False): """Retrieve build folders for a given revision with the help of Treeherder. :param revision: Revision of the build to download. :param job_type_name: Name of the job to look for. For builds it should be 'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`. :param debug_build: Download a debug build. """ builds = set() try: self.logger.info('Querying {url} for list of builds for revision: {revision}'.format( url=self.client.server_url, revision=revision)) # Retrieve the option hash to filter for type of build (opt, and debug for now) option_hash = None for key, values in self.client.get_option_collection_hash().iteritems(): for value in values: if value['name'] == ('debug' if debug_build else 'opt'): option_hash = key break if option_hash: break resultsets = self.client.get_pushes(self.branch, revision=revision) # Set filters to speed-up querying jobs kwargs = { 'option_collection_hash': option_hash, 'job_type_name': job_type_name, 'exclusion_profile': False, } kwargs.update(self.get_treeherder_platform(self.platform)) for resultset in resultsets: kwargs.update({'result_set_id': resultset['id']}) jobs = self.client.get_jobs(self.branch, **kwargs) for job in jobs: log_urls = self.client.get_job_log_url(self.branch, job_id=job['id']) for log_url in log_urls: if self.application in log_url['url']: self.logger.debug('Found build folder: {}'.format(log_url['url'])) builds.update([log_url['url']]) except Exception: self.logger.exception('Failure occurred when querying Treeherder for builds') return list(builds)
python
def query_builds_by_revision(self, revision, job_type_name='Build', debug_build=False): """Retrieve build folders for a given revision with the help of Treeherder. :param revision: Revision of the build to download. :param job_type_name: Name of the job to look for. For builds it should be 'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`. :param debug_build: Download a debug build. """ builds = set() try: self.logger.info('Querying {url} for list of builds for revision: {revision}'.format( url=self.client.server_url, revision=revision)) # Retrieve the option hash to filter for type of build (opt, and debug for now) option_hash = None for key, values in self.client.get_option_collection_hash().iteritems(): for value in values: if value['name'] == ('debug' if debug_build else 'opt'): option_hash = key break if option_hash: break resultsets = self.client.get_pushes(self.branch, revision=revision) # Set filters to speed-up querying jobs kwargs = { 'option_collection_hash': option_hash, 'job_type_name': job_type_name, 'exclusion_profile': False, } kwargs.update(self.get_treeherder_platform(self.platform)) for resultset in resultsets: kwargs.update({'result_set_id': resultset['id']}) jobs = self.client.get_jobs(self.branch, **kwargs) for job in jobs: log_urls = self.client.get_job_log_url(self.branch, job_id=job['id']) for log_url in log_urls: if self.application in log_url['url']: self.logger.debug('Found build folder: {}'.format(log_url['url'])) builds.update([log_url['url']]) except Exception: self.logger.exception('Failure occurred when querying Treeherder for builds') return list(builds)
[ "def", "query_builds_by_revision", "(", "self", ",", "revision", ",", "job_type_name", "=", "'Build'", ",", "debug_build", "=", "False", ")", ":", "builds", "=", "set", "(", ")", "try", ":", "self", ".", "logger", ".", "info", "(", "'Querying {url} for list ...
Retrieve build folders for a given revision with the help of Treeherder. :param revision: Revision of the build to download. :param job_type_name: Name of the job to look for. For builds it should be 'Build', 'Nightly', and 'L10n Nightly'. Defaults to `Build`. :param debug_build: Download a debug build.
[ "Retrieve", "build", "folders", "for", "a", "given", "revision", "with", "the", "help", "of", "Treeherder", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/treeherder.py#L60-L107
train
mozilla/mozdownload
mozdownload/utils.py
urljoin
def urljoin(*fragments): """Concatenate multi part strings into urls.""" # Strip possible already existent final slashes of fragments except for the last one parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]] parts.append(fragments[-1]) return '/'.join(parts)
python
def urljoin(*fragments): """Concatenate multi part strings into urls.""" # Strip possible already existent final slashes of fragments except for the last one parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]] parts.append(fragments[-1]) return '/'.join(parts)
[ "def", "urljoin", "(", "*", "fragments", ")", ":", "parts", "=", "[", "fragment", ".", "rstrip", "(", "'/'", ")", "for", "fragment", "in", "fragments", "[", ":", "len", "(", "fragments", ")", "-", "1", "]", "]", "parts", ".", "append", "(", "fragme...
Concatenate multi part strings into urls.
[ "Concatenate", "multi", "part", "strings", "into", "urls", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/utils.py#L12-L18
train
mozilla/mozdownload
mozdownload/utils.py
create_md5
def create_md5(path): """Create the md5 hash of a file using the hashlib library.""" m = hashlib.md5() # rb necessary to run correctly in windows. with open(path, "rb") as f: while True: data = f.read(8192) if not data: break m.update(data) return m.hexdigest()
python
def create_md5(path): """Create the md5 hash of a file using the hashlib library.""" m = hashlib.md5() # rb necessary to run correctly in windows. with open(path, "rb") as f: while True: data = f.read(8192) if not data: break m.update(data) return m.hexdigest()
[ "def", "create_md5", "(", "path", ")", ":", "m", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "while", "True", ":", "data", "=", "f", ".", "read", "(", "8192", ")", "if", "not", "data", ...
Create the md5 hash of a file using the hashlib library.
[ "Create", "the", "md5", "hash", "of", "a", "file", "using", "the", "hashlib", "library", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/utils.py#L21-L32
train
mozilla/mozdownload
mozdownload/parser.py
DirectoryParser.filter
def filter(self, filter): """Filter entries by calling function or applying regex.""" if hasattr(filter, '__call__'): return [entry for entry in self.entries if filter(entry)] else: pattern = re.compile(filter, re.IGNORECASE) return [entry for entry in self.entries if pattern.match(entry)]
python
def filter(self, filter): """Filter entries by calling function or applying regex.""" if hasattr(filter, '__call__'): return [entry for entry in self.entries if filter(entry)] else: pattern = re.compile(filter, re.IGNORECASE) return [entry for entry in self.entries if pattern.match(entry)]
[ "def", "filter", "(", "self", ",", "filter", ")", ":", "if", "hasattr", "(", "filter", ",", "'__call__'", ")", ":", "return", "[", "entry", "for", "entry", "in", "self", ".", "entries", "if", "filter", "(", "entry", ")", "]", "else", ":", "pattern", ...
Filter entries by calling function or applying regex.
[ "Filter", "entries", "by", "calling", "function", "or", "applying", "regex", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L53-L59
train
mozilla/mozdownload
mozdownload/parser.py
DirectoryParser.handle_starttag
def handle_starttag(self, tag, attrs): """Callback for when a tag gets opened.""" if not tag == 'a': return for attr in attrs: if attr[0] == 'href': # Links look like: /pub/firefox/nightly/2015/ # We have to trim the fragment down to the last item. Also to ensure we # always get it, we remove a possible final slash first url = urllib.unquote(attr[1]) self.active_url = url.rstrip('/').split('/')[-1] return
python
def handle_starttag(self, tag, attrs): """Callback for when a tag gets opened.""" if not tag == 'a': return for attr in attrs: if attr[0] == 'href': # Links look like: /pub/firefox/nightly/2015/ # We have to trim the fragment down to the last item. Also to ensure we # always get it, we remove a possible final slash first url = urllib.unquote(attr[1]) self.active_url = url.rstrip('/').split('/')[-1] return
[ "def", "handle_starttag", "(", "self", ",", "tag", ",", "attrs", ")", ":", "if", "not", "tag", "==", "'a'", ":", "return", "for", "attr", "in", "attrs", ":", "if", "attr", "[", "0", "]", "==", "'href'", ":", "url", "=", "urllib", ".", "unquote", ...
Callback for when a tag gets opened.
[ "Callback", "for", "when", "a", "tag", "gets", "opened", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L61-L74
train
mozilla/mozdownload
mozdownload/parser.py
DirectoryParser.handle_data
def handle_data(self, data): """Callback when the data of a tag has been collected.""" # Only process the data when we are in an active a tag and have an URL. if not self.active_url: return # The visible text can have a final slash so strip it off if data.strip('/') == self.active_url: self.entries.append(self.active_url)
python
def handle_data(self, data): """Callback when the data of a tag has been collected.""" # Only process the data when we are in an active a tag and have an URL. if not self.active_url: return # The visible text can have a final slash so strip it off if data.strip('/') == self.active_url: self.entries.append(self.active_url)
[ "def", "handle_data", "(", "self", ",", "data", ")", ":", "if", "not", "self", ".", "active_url", ":", "return", "if", "data", ".", "strip", "(", "'/'", ")", "==", "self", ".", "active_url", ":", "self", ".", "entries", ".", "append", "(", "self", ...
Callback when the data of a tag has been collected.
[ "Callback", "when", "the", "data", "of", "a", "tag", "has", "been", "collected", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/parser.py#L81-L89
train
mozilla/mozdownload
mozdownload/timezones.py
PacificTimezone.dst
def dst(self, dt): """Calculate delta for daylight saving.""" # Daylight saving starts on the second Sunday of March at 2AM standard dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \ + timedelta(hours=2) # Daylight saving ends on the first Sunday of November at 2AM standard dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2) if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date: return timedelta(hours=1) else: return timedelta(0)
python
def dst(self, dt): """Calculate delta for daylight saving.""" # Daylight saving starts on the second Sunday of March at 2AM standard dst_start_date = self.first_sunday(dt.year, 3) + timedelta(days=7) \ + timedelta(hours=2) # Daylight saving ends on the first Sunday of November at 2AM standard dst_end_date = self.first_sunday(dt.year, 11) + timedelta(hours=2) if dst_start_date <= dt.replace(tzinfo=None) < dst_end_date: return timedelta(hours=1) else: return timedelta(0)
[ "def", "dst", "(", "self", ",", "dt", ")", ":", "dst_start_date", "=", "self", ".", "first_sunday", "(", "dt", ".", "year", ",", "3", ")", "+", "timedelta", "(", "days", "=", "7", ")", "+", "timedelta", "(", "hours", "=", "2", ")", "dst_end_date", ...
Calculate delta for daylight saving.
[ "Calculate", "delta", "for", "daylight", "saving", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/timezones.py#L23-L34
train
mozilla/mozdownload
mozdownload/timezones.py
PacificTimezone.first_sunday
def first_sunday(self, year, month): """Get the first sunday of a month.""" date = datetime(year, month, 1, 0) days_until_sunday = 6 - date.weekday() return date + timedelta(days=days_until_sunday)
python
def first_sunday(self, year, month): """Get the first sunday of a month.""" date = datetime(year, month, 1, 0) days_until_sunday = 6 - date.weekday() return date + timedelta(days=days_until_sunday)
[ "def", "first_sunday", "(", "self", ",", "year", ",", "month", ")", ":", "date", "=", "datetime", "(", "year", ",", "month", ",", "1", ",", "0", ")", "days_until_sunday", "=", "6", "-", "date", ".", "weekday", "(", ")", "return", "date", "+", "time...
Get the first sunday of a month.
[ "Get", "the", "first", "sunday", "of", "a", "month", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/timezones.py#L36-L41
train
mozilla/mozdownload
mozdownload/scraper.py
Scraper.binary
def binary(self): """Return the name of the build.""" def _get_binary(): # Retrieve all entries from the remote virtual folder parser = self._create_directory_parser(self.path) if not parser.entries: raise errors.NotFoundError('No entries found', self.path) # Download the first matched directory entry pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: self._binary = pattern.match(entry).group() break except Exception: # No match, continue with next entry continue else: raise errors.NotFoundError("Binary not found in folder", self.path) self._retry_check_404(_get_binary) return self._binary
python
def binary(self): """Return the name of the build.""" def _get_binary(): # Retrieve all entries from the remote virtual folder parser = self._create_directory_parser(self.path) if not parser.entries: raise errors.NotFoundError('No entries found', self.path) # Download the first matched directory entry pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: self._binary = pattern.match(entry).group() break except Exception: # No match, continue with next entry continue else: raise errors.NotFoundError("Binary not found in folder", self.path) self._retry_check_404(_get_binary) return self._binary
[ "def", "binary", "(", "self", ")", ":", "def", "_get_binary", "(", ")", ":", "parser", "=", "self", ".", "_create_directory_parser", "(", "self", ".", "path", ")", "if", "not", "parser", ".", "entries", ":", "raise", "errors", ".", "NotFoundError", "(", ...
Return the name of the build.
[ "Return", "the", "name", "of", "the", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L165-L188
train
mozilla/mozdownload
mozdownload/scraper.py
Scraper.url
def url(self): """Return the URL of the build.""" return urllib.quote(urljoin(self.path, self.binary), safe='%/:=&?~#+!$,;\'@()*[]|')
python
def url(self): """Return the URL of the build.""" return urllib.quote(urljoin(self.path, self.binary), safe='%/:=&?~#+!$,;\'@()*[]|')
[ "def", "url", "(", "self", ")", ":", "return", "urllib", ".", "quote", "(", "urljoin", "(", "self", ".", "path", ",", "self", ".", "binary", ")", ",", "safe", "=", "'%/:=&?~#+!$,;\\'@()*[]|'", ")" ]
Return the URL of the build.
[ "Return", "the", "URL", "of", "the", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L196-L199
train
mozilla/mozdownload
mozdownload/scraper.py
Scraper.filename
def filename(self): """Return the local filename of the build.""" if self._filename is None: if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise create it from the build details target_file = os.path.join(self.destination, self.build_filename(self.binary)) self._filename = os.path.abspath(target_file) return self._filename
python
def filename(self): """Return the local filename of the build.""" if self._filename is None: if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise create it from the build details target_file = os.path.join(self.destination, self.build_filename(self.binary)) self._filename = os.path.abspath(target_file) return self._filename
[ "def", "filename", "(", "self", ")", ":", "if", "self", ".", "_filename", "is", "None", ":", "if", "os", ".", "path", ".", "splitext", "(", "self", ".", "destination", ")", "[", "1", "]", ":", "target_file", "=", "self", ".", "destination", "else", ...
Return the local filename of the build.
[ "Return", "the", "local", "filename", "of", "the", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L217-L230
train
mozilla/mozdownload
mozdownload/scraper.py
Scraper.download
def download(self): """Download the specified file.""" def total_seconds(td): # Keep backward compatibility with Python 2.6 which doesn't have # this method if hasattr(td, 'total_seconds'): return td.total_seconds() else: return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 # Don't re-download the file if os.path.isfile(os.path.abspath(self.filename)): self.logger.info("File has already been downloaded: %s" % (self.filename)) return self.filename directory = os.path.dirname(self.filename) if not os.path.isdir(directory): os.makedirs(directory) self.logger.info('Downloading from: %s' % self.url) self.logger.info('Saving as: %s' % self.filename) tmp_file = self.filename + ".part" def _download(): try: start_time = datetime.now() # Enable streaming mode so we can download content in chunks r = self.session.get(self.url, stream=True) r.raise_for_status() content_length = r.headers.get('Content-length') # ValueError: Value out of range if only total_size given if content_length: total_size = int(content_length.strip()) max_value = ((total_size / CHUNK_SIZE) + 1) * CHUNK_SIZE bytes_downloaded = 0 log_level = self.logger.getEffectiveLevel() if log_level <= logging.INFO and content_length: widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.FileTransferSpeed()] pbar = pb.ProgressBar(widgets=widgets, maxval=max_value).start() with open(tmp_file, 'wb') as f: for chunk in r.iter_content(CHUNK_SIZE): f.write(chunk) bytes_downloaded += CHUNK_SIZE if log_level <= logging.INFO and content_length: pbar.update(bytes_downloaded) t1 = total_seconds(datetime.now() - start_time) if self.timeout_download and \ t1 >= self.timeout_download: raise errors.TimeoutError if log_level <= logging.INFO and content_length: pbar.finish() except Exception: if os.path.isfile(tmp_file): os.remove(tmp_file) raise self._retry(_download, retry_exceptions=(requests.exceptions.RequestException, errors.TimeoutError)) os.rename(tmp_file, self.filename) return self.filename
python
def download(self): """Download the specified file.""" def total_seconds(td): # Keep backward compatibility with Python 2.6 which doesn't have # this method if hasattr(td, 'total_seconds'): return td.total_seconds() else: return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6 # Don't re-download the file if os.path.isfile(os.path.abspath(self.filename)): self.logger.info("File has already been downloaded: %s" % (self.filename)) return self.filename directory = os.path.dirname(self.filename) if not os.path.isdir(directory): os.makedirs(directory) self.logger.info('Downloading from: %s' % self.url) self.logger.info('Saving as: %s' % self.filename) tmp_file = self.filename + ".part" def _download(): try: start_time = datetime.now() # Enable streaming mode so we can download content in chunks r = self.session.get(self.url, stream=True) r.raise_for_status() content_length = r.headers.get('Content-length') # ValueError: Value out of range if only total_size given if content_length: total_size = int(content_length.strip()) max_value = ((total_size / CHUNK_SIZE) + 1) * CHUNK_SIZE bytes_downloaded = 0 log_level = self.logger.getEffectiveLevel() if log_level <= logging.INFO and content_length: widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA(), ' ', pb.FileTransferSpeed()] pbar = pb.ProgressBar(widgets=widgets, maxval=max_value).start() with open(tmp_file, 'wb') as f: for chunk in r.iter_content(CHUNK_SIZE): f.write(chunk) bytes_downloaded += CHUNK_SIZE if log_level <= logging.INFO and content_length: pbar.update(bytes_downloaded) t1 = total_seconds(datetime.now() - start_time) if self.timeout_download and \ t1 >= self.timeout_download: raise errors.TimeoutError if log_level <= logging.INFO and content_length: pbar.finish() except Exception: if os.path.isfile(tmp_file): os.remove(tmp_file) raise self._retry(_download, retry_exceptions=(requests.exceptions.RequestException, errors.TimeoutError)) os.rename(tmp_file, self.filename) return self.filename
[ "def", "download", "(", "self", ")", ":", "def", "total_seconds", "(", "td", ")", ":", "if", "hasattr", "(", "td", ",", "'total_seconds'", ")", ":", "return", "td", ".", "total_seconds", "(", ")", "else", ":", "return", "(", "td", ".", "microseconds", ...
Download the specified file.
[ "Download", "the", "specified", "file", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L249-L324
train
mozilla/mozdownload
mozdownload/scraper.py
Scraper.show_matching_builds
def show_matching_builds(self, builds): """Output the matching builds.""" self.logger.info('Found %s build%s: %s' % ( len(builds), len(builds) > 1 and 's' or '', len(builds) > 10 and ' ... '.join([', '.join(builds[:5]), ', '.join(builds[-5:])]) or ', '.join(builds)))
python
def show_matching_builds(self, builds): """Output the matching builds.""" self.logger.info('Found %s build%s: %s' % ( len(builds), len(builds) > 1 and 's' or '', len(builds) > 10 and ' ... '.join([', '.join(builds[:5]), ', '.join(builds[-5:])]) or ', '.join(builds)))
[ "def", "show_matching_builds", "(", "self", ",", "builds", ")", ":", "self", ".", "logger", ".", "info", "(", "'Found %s build%s: %s'", "%", "(", "len", "(", "builds", ")", ",", "len", "(", "builds", ")", ">", "1", "and", "'s'", "or", "''", ",", "len...
Output the matching builds.
[ "Output", "the", "matching", "builds", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L326-L333
train
mozilla/mozdownload
mozdownload/scraper.py
DailyScraper.is_build_dir
def is_build_dir(self, folder_name): """Return whether or not the given dir contains a build.""" # Cannot move up to base scraper due to parser.entries call in # get_build_info_for_date (see below) url = '%s/' % urljoin(self.base_url, self.monthly_build_list_regex, folder_name) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != 'multi': url = '%s/' % urljoin(url, self.locale) parser = self._create_directory_parser(url) pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: pattern.match(entry).group() return True except Exception: # No match, continue with next entry continue return False
python
def is_build_dir(self, folder_name): """Return whether or not the given dir contains a build.""" # Cannot move up to base scraper due to parser.entries call in # get_build_info_for_date (see below) url = '%s/' % urljoin(self.base_url, self.monthly_build_list_regex, folder_name) if self.application in APPLICATIONS_MULTI_LOCALE \ and self.locale != 'multi': url = '%s/' % urljoin(url, self.locale) parser = self._create_directory_parser(url) pattern = re.compile(self.binary_regex, re.IGNORECASE) for entry in parser.entries: try: pattern.match(entry).group() return True except Exception: # No match, continue with next entry continue return False
[ "def", "is_build_dir", "(", "self", ",", "folder_name", ")", ":", "url", "=", "'%s/'", "%", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "monthly_build_list_regex", ",", "folder_name", ")", "if", "self", ".", "application", "in", "APPLICATIONS_...
Return whether or not the given dir contains a build.
[ "Return", "whether", "or", "not", "the", "given", "dir", "contains", "a", "build", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L435-L455
train
mozilla/mozdownload
mozdownload/scraper.py
DailyScraper.get_build_info_for_date
def get_build_info_for_date(self, date, build_index=None): """Return the build information for a given date.""" url = urljoin(self.base_url, self.monthly_build_list_regex) has_time = date and date.time() self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$' % { 'DATE': date.strftime('%Y-%m-%d'), 'BRANCH': self.branch, # ensure to select the correct subfolder for localized builds 'L10N': '(-l10n)?' if self.locale_build else '', 'PLATFORM': '' if self.application not in ( 'fennec') else '-' + self.platform } parser.entries = parser.filter(regex) parser.entries = parser.filter(self.is_build_dir) if has_time: # If a time is included in the date, use it to determine the # build's index regex = r'.*%s.*' % date.strftime('%H-%M-%S') parser.entries = parser.filter(regex) if not parser.entries: date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d' message = 'Folder for builds on %s has not been found' % \ self.date.strftime(date_format) raise errors.NotFoundError(message, url) # If no index has been given, set it to the last build of the day. self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
python
def get_build_info_for_date(self, date, build_index=None): """Return the build information for a given date.""" url = urljoin(self.base_url, self.monthly_build_list_regex) has_time = date and date.time() self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) regex = r'%(DATE)s-(\d+-)+%(BRANCH)s%(L10N)s%(PLATFORM)s$' % { 'DATE': date.strftime('%Y-%m-%d'), 'BRANCH': self.branch, # ensure to select the correct subfolder for localized builds 'L10N': '(-l10n)?' if self.locale_build else '', 'PLATFORM': '' if self.application not in ( 'fennec') else '-' + self.platform } parser.entries = parser.filter(regex) parser.entries = parser.filter(self.is_build_dir) if has_time: # If a time is included in the date, use it to determine the # build's index regex = r'.*%s.*' % date.strftime('%H-%M-%S') parser.entries = parser.filter(regex) if not parser.entries: date_format = '%Y-%m-%d-%H-%M-%S' if has_time else '%Y-%m-%d' message = 'Folder for builds on %s has not been found' % \ self.date.strftime(date_format) raise errors.NotFoundError(message, url) # If no index has been given, set it to the last build of the day. self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
[ "def", "get_build_info_for_date", "(", "self", ",", "date", ",", "build_index", "=", "None", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "monthly_build_list_regex", ")", "has_time", "=", "date", "and", "date", ".", "tim...
Return the build information for a given date.
[ "Return", "the", "build", "information", "for", "a", "given", "date", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L457-L500
train
mozilla/mozdownload
mozdownload/scraper.py
DailyScraper.monthly_build_list_regex
def monthly_build_list_regex(self): """Return the regex for the folder containing builds of a month.""" # Regex for possible builds for the given date return r'nightly/%(YEAR)s/%(MONTH)s/' % { 'YEAR': self.date.year, 'MONTH': str(self.date.month).zfill(2)}
python
def monthly_build_list_regex(self): """Return the regex for the folder containing builds of a month.""" # Regex for possible builds for the given date return r'nightly/%(YEAR)s/%(MONTH)s/' % { 'YEAR': self.date.year, 'MONTH': str(self.date.month).zfill(2)}
[ "def", "monthly_build_list_regex", "(", "self", ")", ":", "return", "r'nightly/%(YEAR)s/%(MONTH)s/'", "%", "{", "'YEAR'", ":", "self", ".", "date", ".", "year", ",", "'MONTH'", ":", "str", "(", "self", ".", "date", ".", "month", ")", ".", "zfill", "(", "...
Return the regex for the folder containing builds of a month.
[ "Return", "the", "regex", "for", "the", "folder", "containing", "builds", "of", "a", "month", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L544-L549
train
mozilla/mozdownload
mozdownload/scraper.py
DirectScraper.filename
def filename(self): """File name of the downloaded file.""" if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise determine it from the url. parsed_url = urlparse(self.url) source_filename = (parsed_url.path.rpartition('/')[-1] or parsed_url.hostname) target_file = os.path.join(self.destination, source_filename) return os.path.abspath(target_file)
python
def filename(self): """File name of the downloaded file.""" if os.path.splitext(self.destination)[1]: # If the filename has been given make use of it target_file = self.destination else: # Otherwise determine it from the url. parsed_url = urlparse(self.url) source_filename = (parsed_url.path.rpartition('/')[-1] or parsed_url.hostname) target_file = os.path.join(self.destination, source_filename) return os.path.abspath(target_file)
[ "def", "filename", "(", "self", ")", ":", "if", "os", ".", "path", ".", "splitext", "(", "self", ".", "destination", ")", "[", "1", "]", ":", "target_file", "=", "self", ".", "destination", "else", ":", "parsed_url", "=", "urlparse", "(", "self", "."...
File name of the downloaded file.
[ "File", "name", "of", "the", "downloaded", "file", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L577-L589
train
mozilla/mozdownload
mozdownload/scraper.py
ReleaseScraper.query_versions
def query_versions(self, version=None): """Check specified version and resolve special values.""" if version not in RELEASE_AND_CANDIDATE_LATEST_VERSIONS: return [version] url = urljoin(self.base_url, 'releases/') parser = self._create_directory_parser(url) if version: versions = parser.filter(RELEASE_AND_CANDIDATE_LATEST_VERSIONS[version]) from distutils.version import LooseVersion versions.sort(key=LooseVersion) return [versions[-1]] else: return parser.entries
python
def query_versions(self, version=None): """Check specified version and resolve special values.""" if version not in RELEASE_AND_CANDIDATE_LATEST_VERSIONS: return [version] url = urljoin(self.base_url, 'releases/') parser = self._create_directory_parser(url) if version: versions = parser.filter(RELEASE_AND_CANDIDATE_LATEST_VERSIONS[version]) from distutils.version import LooseVersion versions.sort(key=LooseVersion) return [versions[-1]] else: return parser.entries
[ "def", "query_versions", "(", "self", ",", "version", "=", "None", ")", ":", "if", "version", "not", "in", "RELEASE_AND_CANDIDATE_LATEST_VERSIONS", ":", "return", "[", "version", "]", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "'releases/'", "...
Check specified version and resolve special values.
[ "Check", "specified", "version", "and", "resolve", "special", "values", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L657-L670
train
mozilla/mozdownload
mozdownload/scraper.py
TinderboxScraper.build_list_regex
def build_list_regex(self): """Return the regex for the folder which contains the list of builds.""" regex = 'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/' return regex % { 'BRANCH': self.branch, 'PLATFORM': '' if self.locale_build else self.platform_regex, 'L10N': 'l10n' if self.locale_build else '', 'DEBUG': '-debug' if self.debug_build else ''}
python
def build_list_regex(self): """Return the regex for the folder which contains the list of builds.""" regex = 'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/' return regex % { 'BRANCH': self.branch, 'PLATFORM': '' if self.locale_build else self.platform_regex, 'L10N': 'l10n' if self.locale_build else '', 'DEBUG': '-debug' if self.debug_build else ''}
[ "def", "build_list_regex", "(", "self", ")", ":", "regex", "=", "'tinderbox-builds/%(BRANCH)s-%(PLATFORM)s%(L10N)s%(DEBUG)s/'", "return", "regex", "%", "{", "'BRANCH'", ":", "self", ".", "branch", ",", "'PLATFORM'", ":", "''", "if", "self", ".", "locale_build", "e...
Return the regex for the folder which contains the list of builds.
[ "Return", "the", "regex", "for", "the", "folder", "which", "contains", "the", "list", "of", "builds", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L845-L853
train
mozilla/mozdownload
mozdownload/scraper.py
TinderboxScraper.date_matches
def date_matches(self, timestamp): """Determine whether the timestamp date is equal to the argument date.""" if self.date is None: return False timestamp = datetime.fromtimestamp(float(timestamp), self.timezone) if self.date.date() == timestamp.date(): return True return False
python
def date_matches(self, timestamp): """Determine whether the timestamp date is equal to the argument date.""" if self.date is None: return False timestamp = datetime.fromtimestamp(float(timestamp), self.timezone) if self.date.date() == timestamp.date(): return True return False
[ "def", "date_matches", "(", "self", ",", "timestamp", ")", ":", "if", "self", ".", "date", "is", "None", ":", "return", "False", "timestamp", "=", "datetime", ".", "fromtimestamp", "(", "float", "(", "timestamp", ")", ",", "self", ".", "timezone", ")", ...
Determine whether the timestamp date is equal to the argument date.
[ "Determine", "whether", "the", "timestamp", "date", "is", "equal", "to", "the", "argument", "date", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L855-L864
train
mozilla/mozdownload
mozdownload/scraper.py
TinderboxScraper.get_build_info_for_index
def get_build_info_for_index(self, build_index=None): """Get additional information for the build at the given index.""" url = urljoin(self.base_url, self.build_list_regex) self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) parser.entries = parser.filter(r'^\d+$') if self.timestamp: # If a timestamp is given, retrieve the folder with the timestamp # as name parser.entries = self.timestamp in parser.entries and \ [self.timestamp] elif self.date: # If date is given, retrieve the subset of builds on that date parser.entries = filter(self.date_matches, parser.entries) if not parser.entries: message = 'No builds have been found' raise errors.NotFoundError(message, url) self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
python
def get_build_info_for_index(self, build_index=None): """Get additional information for the build at the given index.""" url = urljoin(self.base_url, self.build_list_regex) self.logger.info('Retrieving list of builds from %s' % url) parser = self._create_directory_parser(url) parser.entries = parser.filter(r'^\d+$') if self.timestamp: # If a timestamp is given, retrieve the folder with the timestamp # as name parser.entries = self.timestamp in parser.entries and \ [self.timestamp] elif self.date: # If date is given, retrieve the subset of builds on that date parser.entries = filter(self.date_matches, parser.entries) if not parser.entries: message = 'No builds have been found' raise errors.NotFoundError(message, url) self.show_matching_builds(parser.entries) # If no index has been given, set it to the last build of the day. if build_index is None: # Find the most recent non-empty entry. build_index = len(parser.entries) for build in reversed(parser.entries): build_index -= 1 if not build_index or self.is_build_dir(build): break self.logger.info('Selected build: %s' % parser.entries[build_index]) return (parser.entries, build_index)
[ "def", "get_build_info_for_index", "(", "self", ",", "build_index", "=", "None", ")", ":", "url", "=", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "build_list_regex", ")", "self", ".", "logger", ".", "info", "(", "'Retrieving list of builds from...
Get additional information for the build at the given index.
[ "Get", "additional", "information", "for", "the", "build", "at", "the", "given", "index", "." ]
97796a028455bb5200434562d23b66d5a5eb537b
https://github.com/mozilla/mozdownload/blob/97796a028455bb5200434562d23b66d5a5eb537b/mozdownload/scraper.py#L900-L935
train
romanz/trezor-agent
libagent/device/ui.py
create_default_options_getter
def create_default_options_getter(): """Return current TTY and DISPLAY settings for GnuPG pinentry.""" options = [] try: ttyname = subprocess.check_output(args=['tty']).strip() options.append(b'ttyname=' + ttyname) except subprocess.CalledProcessError as e: log.warning('no TTY found: %s', e) display = os.environ.get('DISPLAY') if display is not None: options.append('display={}'.format(display).encode('ascii')) else: log.warning('DISPLAY not defined') log.info('using %s for pinentry options', options) return lambda: options
python
def create_default_options_getter(): """Return current TTY and DISPLAY settings for GnuPG pinentry.""" options = [] try: ttyname = subprocess.check_output(args=['tty']).strip() options.append(b'ttyname=' + ttyname) except subprocess.CalledProcessError as e: log.warning('no TTY found: %s', e) display = os.environ.get('DISPLAY') if display is not None: options.append('display={}'.format(display).encode('ascii')) else: log.warning('DISPLAY not defined') log.info('using %s for pinentry options', options) return lambda: options
[ "def", "create_default_options_getter", "(", ")", ":", "options", "=", "[", "]", "try", ":", "ttyname", "=", "subprocess", ".", "check_output", "(", "args", "=", "[", "'tty'", "]", ")", ".", "strip", "(", ")", "options", ".", "append", "(", "b'ttyname='"...
Return current TTY and DISPLAY settings for GnuPG pinentry.
[ "Return", "current", "TTY", "and", "DISPLAY", "settings", "for", "GnuPG", "pinentry", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L64-L80
train
romanz/trezor-agent
libagent/device/ui.py
write
def write(p, line): """Send and flush a single line to the subprocess' stdin.""" log.debug('%s <- %r', p.args, line) p.stdin.write(line) p.stdin.flush()
python
def write(p, line): """Send and flush a single line to the subprocess' stdin.""" log.debug('%s <- %r', p.args, line) p.stdin.write(line) p.stdin.flush()
[ "def", "write", "(", "p", ",", "line", ")", ":", "log", ".", "debug", "(", "'%s <- %r'", ",", "p", ".", "args", ",", "line", ")", "p", ".", "stdin", ".", "write", "(", "line", ")", "p", ".", "stdin", ".", "flush", "(", ")" ]
Send and flush a single line to the subprocess' stdin.
[ "Send", "and", "flush", "a", "single", "line", "to", "the", "subprocess", "stdin", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L83-L87
train
romanz/trezor-agent
libagent/device/ui.py
expect
def expect(p, prefixes, confidential=False): """Read a line and return it without required prefix.""" resp = p.stdout.readline() log.debug('%s -> %r', p.args, resp if not confidential else '********') for prefix in prefixes: if resp.startswith(prefix): return resp[len(prefix):] raise UnexpectedError(resp)
python
def expect(p, prefixes, confidential=False): """Read a line and return it without required prefix.""" resp = p.stdout.readline() log.debug('%s -> %r', p.args, resp if not confidential else '********') for prefix in prefixes: if resp.startswith(prefix): return resp[len(prefix):] raise UnexpectedError(resp)
[ "def", "expect", "(", "p", ",", "prefixes", ",", "confidential", "=", "False", ")", ":", "resp", "=", "p", ".", "stdout", ".", "readline", "(", ")", "log", ".", "debug", "(", "'%s -> %r'", ",", "p", ".", "args", ",", "resp", "if", "not", "confident...
Read a line and return it without required prefix.
[ "Read", "a", "line", "and", "return", "it", "without", "required", "prefix", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L94-L101
train
romanz/trezor-agent
libagent/device/ui.py
interact
def interact(title, description, prompt, binary, options): """Use GPG pinentry program to interact with the user.""" args = [binary] p = subprocess.Popen(args=args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=os.environ) p.args = args # TODO: remove after Python 2 deprecation. expect(p, [b'OK']) title = util.assuan_serialize(title.encode('ascii')) write(p, b'SETTITLE ' + title + b'\n') expect(p, [b'OK']) if description: description = util.assuan_serialize(description.encode('ascii')) write(p, b'SETDESC ' + description + b'\n') expect(p, [b'OK']) if prompt: prompt = util.assuan_serialize(prompt.encode('ascii')) write(p, b'SETPROMPT ' + prompt + b'\n') expect(p, [b'OK']) log.debug('setting %d options', len(options)) for opt in options: write(p, b'OPTION ' + opt + b'\n') expect(p, [b'OK', b'ERR']) write(p, b'GETPIN\n') pin = expect(p, [b'OK', b'D '], confidential=True) p.communicate() # close stdin and wait for the process to exit exit_code = p.wait() if exit_code: raise subprocess.CalledProcessError(exit_code, binary) return pin.decode('ascii').strip()
python
def interact(title, description, prompt, binary, options): """Use GPG pinentry program to interact with the user.""" args = [binary] p = subprocess.Popen(args=args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=os.environ) p.args = args # TODO: remove after Python 2 deprecation. expect(p, [b'OK']) title = util.assuan_serialize(title.encode('ascii')) write(p, b'SETTITLE ' + title + b'\n') expect(p, [b'OK']) if description: description = util.assuan_serialize(description.encode('ascii')) write(p, b'SETDESC ' + description + b'\n') expect(p, [b'OK']) if prompt: prompt = util.assuan_serialize(prompt.encode('ascii')) write(p, b'SETPROMPT ' + prompt + b'\n') expect(p, [b'OK']) log.debug('setting %d options', len(options)) for opt in options: write(p, b'OPTION ' + opt + b'\n') expect(p, [b'OK', b'ERR']) write(p, b'GETPIN\n') pin = expect(p, [b'OK', b'D '], confidential=True) p.communicate() # close stdin and wait for the process to exit exit_code = p.wait() if exit_code: raise subprocess.CalledProcessError(exit_code, binary) return pin.decode('ascii').strip()
[ "def", "interact", "(", "title", ",", "description", ",", "prompt", ",", "binary", ",", "options", ")", ":", "args", "=", "[", "binary", "]", "p", "=", "subprocess", ".", "Popen", "(", "args", "=", "args", ",", "stdin", "=", "subprocess", ".", "PIPE"...
Use GPG pinentry program to interact with the user.
[ "Use", "GPG", "pinentry", "program", "to", "interact", "with", "the", "user", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L104-L141
train
romanz/trezor-agent
libagent/device/ui.py
UI.get_passphrase
def get_passphrase(self, prompt='Passphrase:'): """Ask the user for passphrase.""" passphrase = None if self.cached_passphrase_ack: passphrase = self.cached_passphrase_ack.get() if passphrase is None: passphrase = interact( title='{} passphrase'.format(self.device_name), prompt=prompt, description=None, binary=self.passphrase_entry_binary, options=self.options_getter()) if self.cached_passphrase_ack: self.cached_passphrase_ack.set(passphrase) return passphrase
python
def get_passphrase(self, prompt='Passphrase:'): """Ask the user for passphrase.""" passphrase = None if self.cached_passphrase_ack: passphrase = self.cached_passphrase_ack.get() if passphrase is None: passphrase = interact( title='{} passphrase'.format(self.device_name), prompt=prompt, description=None, binary=self.passphrase_entry_binary, options=self.options_getter()) if self.cached_passphrase_ack: self.cached_passphrase_ack.set(passphrase) return passphrase
[ "def", "get_passphrase", "(", "self", ",", "prompt", "=", "'Passphrase:'", ")", ":", "passphrase", "=", "None", "if", "self", ".", "cached_passphrase_ack", ":", "passphrase", "=", "self", ".", "cached_passphrase_ack", ".", "get", "(", ")", "if", "passphrase", ...
Ask the user for passphrase.
[ "Ask", "the", "user", "for", "passphrase", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L43-L57
train
romanz/trezor-agent
libagent/ssh/client.py
Client.export_public_keys
def export_public_keys(self, identities): """Export SSH public keys from the device.""" public_keys = [] with self.device: for i in identities: pubkey = self.device.pubkey(identity=i) vk = formats.decompress_pubkey(pubkey=pubkey, curve_name=i.curve_name) public_key = formats.export_public_key(vk=vk, label=i.to_string()) public_keys.append(public_key) return public_keys
python
def export_public_keys(self, identities): """Export SSH public keys from the device.""" public_keys = [] with self.device: for i in identities: pubkey = self.device.pubkey(identity=i) vk = formats.decompress_pubkey(pubkey=pubkey, curve_name=i.curve_name) public_key = formats.export_public_key(vk=vk, label=i.to_string()) public_keys.append(public_key) return public_keys
[ "def", "export_public_keys", "(", "self", ",", "identities", ")", ":", "public_keys", "=", "[", "]", "with", "self", ".", "device", ":", "for", "i", "in", "identities", ":", "pubkey", "=", "self", ".", "device", ".", "pubkey", "(", "identity", "=", "i"...
Export SSH public keys from the device.
[ "Export", "SSH", "public", "keys", "from", "the", "device", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/client.py#L21-L32
train
romanz/trezor-agent
libagent/ssh/client.py
Client.sign_ssh_challenge
def sign_ssh_challenge(self, blob, identity): """Sign given blob using a private key on the device.""" msg = _parse_ssh_blob(blob) log.debug('%s: user %r via %r (%r)', msg['conn'], msg['user'], msg['auth'], msg['key_type']) log.debug('nonce: %r', msg['nonce']) fp = msg['public_key']['fingerprint'] log.debug('fingerprint: %s', fp) log.debug('hidden challenge size: %d bytes', len(blob)) log.info('please confirm user "%s" login to "%s" using %s...', msg['user'].decode('ascii'), identity.to_string(), self.device) with self.device: return self.device.sign(blob=blob, identity=identity)
python
def sign_ssh_challenge(self, blob, identity): """Sign given blob using a private key on the device.""" msg = _parse_ssh_blob(blob) log.debug('%s: user %r via %r (%r)', msg['conn'], msg['user'], msg['auth'], msg['key_type']) log.debug('nonce: %r', msg['nonce']) fp = msg['public_key']['fingerprint'] log.debug('fingerprint: %s', fp) log.debug('hidden challenge size: %d bytes', len(blob)) log.info('please confirm user "%s" login to "%s" using %s...', msg['user'].decode('ascii'), identity.to_string(), self.device) with self.device: return self.device.sign(blob=blob, identity=identity)
[ "def", "sign_ssh_challenge", "(", "self", ",", "blob", ",", "identity", ")", ":", "msg", "=", "_parse_ssh_blob", "(", "blob", ")", "log", ".", "debug", "(", "'%s: user %r via %r (%r)'", ",", "msg", "[", "'conn'", "]", ",", "msg", "[", "'user'", "]", ",",...
Sign given blob using a private key on the device.
[ "Sign", "given", "blob", "using", "a", "private", "key", "on", "the", "device", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/client.py#L34-L49
train
romanz/trezor-agent
libagent/formats.py
fingerprint
def fingerprint(blob): """ Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details. """ digest = hashlib.md5(blob).digest() return ':'.join('{:02x}'.format(c) for c in bytearray(digest))
python
def fingerprint(blob): """ Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details. """ digest = hashlib.md5(blob).digest() return ':'.join('{:02x}'.format(c) for c in bytearray(digest))
[ "def", "fingerprint", "(", "blob", ")", ":", "digest", "=", "hashlib", ".", "md5", "(", "blob", ")", ".", "digest", "(", ")", "return", "':'", ".", "join", "(", "'{:02x}'", ".", "format", "(", "c", ")", "for", "c", "in", "bytearray", "(", "digest",...
Compute SSH fingerprint for specified blob. See https://en.wikipedia.org/wiki/Public_key_fingerprint for details.
[ "Compute", "SSH", "fingerprint", "for", "specified", "blob", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/formats.py#L34-L41
train
romanz/trezor-agent
libagent/formats.py
parse_pubkey
def parse_pubkey(blob): """ Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported. """ fp = fingerprint(blob) s = io.BytesIO(blob) key_type = util.read_frame(s) log.debug('key type: %s', key_type) assert key_type in SUPPORTED_KEY_TYPES, key_type result = {'blob': blob, 'type': key_type, 'fingerprint': fp} if key_type == SSH_NIST256_KEY_TYPE: curve_name = util.read_frame(s) log.debug('curve name: %s', curve_name) point = util.read_frame(s) assert s.read() == b'' _type, point = point[:1], point[1:] assert _type == SSH_NIST256_DER_OCTET size = len(point) // 2 assert len(point) == 2 * size coords = (util.bytes2num(point[:size]), util.bytes2num(point[size:])) curve = ecdsa.NIST256p point = ecdsa.ellipticcurve.Point(curve.curve, *coords) def ecdsa_verifier(sig, msg): assert len(sig) == 2 * size sig_decode = ecdsa.util.sigdecode_string vk = ecdsa.VerifyingKey.from_public_point(point, curve, hashfunc) vk.verify(signature=sig, data=msg, sigdecode=sig_decode) parts = [sig[:size], sig[size:]] return b''.join([util.frame(b'\x00' + p) for p in parts]) result.update(point=coords, curve=CURVE_NIST256, verifier=ecdsa_verifier) if key_type == SSH_ED25519_KEY_TYPE: pubkey = util.read_frame(s) assert s.read() == b'' def ed25519_verify(sig, msg): assert len(sig) == 64 vk = ed25519.VerifyingKey(pubkey) vk.verify(sig, msg) return sig result.update(curve=CURVE_ED25519, verifier=ed25519_verify) return result
python
def parse_pubkey(blob): """ Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported. """ fp = fingerprint(blob) s = io.BytesIO(blob) key_type = util.read_frame(s) log.debug('key type: %s', key_type) assert key_type in SUPPORTED_KEY_TYPES, key_type result = {'blob': blob, 'type': key_type, 'fingerprint': fp} if key_type == SSH_NIST256_KEY_TYPE: curve_name = util.read_frame(s) log.debug('curve name: %s', curve_name) point = util.read_frame(s) assert s.read() == b'' _type, point = point[:1], point[1:] assert _type == SSH_NIST256_DER_OCTET size = len(point) // 2 assert len(point) == 2 * size coords = (util.bytes2num(point[:size]), util.bytes2num(point[size:])) curve = ecdsa.NIST256p point = ecdsa.ellipticcurve.Point(curve.curve, *coords) def ecdsa_verifier(sig, msg): assert len(sig) == 2 * size sig_decode = ecdsa.util.sigdecode_string vk = ecdsa.VerifyingKey.from_public_point(point, curve, hashfunc) vk.verify(signature=sig, data=msg, sigdecode=sig_decode) parts = [sig[:size], sig[size:]] return b''.join([util.frame(b'\x00' + p) for p in parts]) result.update(point=coords, curve=CURVE_NIST256, verifier=ecdsa_verifier) if key_type == SSH_ED25519_KEY_TYPE: pubkey = util.read_frame(s) assert s.read() == b'' def ed25519_verify(sig, msg): assert len(sig) == 64 vk = ed25519.VerifyingKey(pubkey) vk.verify(sig, msg) return sig result.update(curve=CURVE_ED25519, verifier=ed25519_verify) return result
[ "def", "parse_pubkey", "(", "blob", ")", ":", "fp", "=", "fingerprint", "(", "blob", ")", "s", "=", "io", ".", "BytesIO", "(", "blob", ")", "key_type", "=", "util", ".", "read_frame", "(", "s", ")", "log", ".", "debug", "(", "'key type: %s'", ",", ...
Parse SSH public key from given blob. Construct a verifier for ECDSA signatures. The verifier returns the signatures in the required SSH format. Currently, NIST256P1 and ED25519 elliptic curves are supported.
[ "Parse", "SSH", "public", "key", "from", "given", "blob", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/formats.py#L44-L97
train
romanz/trezor-agent
libagent/formats.py
export_public_key
def export_public_key(vk, label): """ Export public key to text format. The resulting string can be written into a .pub file or appended to the ~/.ssh/authorized_keys file. """ key_type, blob = serialize_verifying_key(vk) log.debug('fingerprint: %s', fingerprint(blob)) b64 = base64.b64encode(blob).decode('ascii') return u'{} {} {}\n'.format(key_type.decode('ascii'), b64, label)
python
def export_public_key(vk, label): """ Export public key to text format. The resulting string can be written into a .pub file or appended to the ~/.ssh/authorized_keys file. """ key_type, blob = serialize_verifying_key(vk) log.debug('fingerprint: %s', fingerprint(blob)) b64 = base64.b64encode(blob).decode('ascii') return u'{} {} {}\n'.format(key_type.decode('ascii'), b64, label)
[ "def", "export_public_key", "(", "vk", ",", "label", ")", ":", "key_type", ",", "blob", "=", "serialize_verifying_key", "(", "vk", ")", "log", ".", "debug", "(", "'fingerprint: %s'", ",", "fingerprint", "(", "blob", ")", ")", "b64", "=", "base64", ".", "...
Export public key to text format. The resulting string can be written into a .pub file or appended to the ~/.ssh/authorized_keys file.
[ "Export", "public", "key", "to", "text", "format", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/formats.py#L181-L191
train
romanz/trezor-agent
libagent/formats.py
import_public_key
def import_public_key(line): """Parse public key textual format, as saved at a .pub file.""" log.debug('loading SSH public key: %r', line) file_type, base64blob, name = line.split() blob = base64.b64decode(base64blob) result = parse_pubkey(blob) result['name'] = name.encode('utf-8') assert result['type'] == file_type.encode('ascii') log.debug('loaded %s public key: %s', file_type, result['fingerprint']) return result
python
def import_public_key(line): """Parse public key textual format, as saved at a .pub file.""" log.debug('loading SSH public key: %r', line) file_type, base64blob, name = line.split() blob = base64.b64decode(base64blob) result = parse_pubkey(blob) result['name'] = name.encode('utf-8') assert result['type'] == file_type.encode('ascii') log.debug('loaded %s public key: %s', file_type, result['fingerprint']) return result
[ "def", "import_public_key", "(", "line", ")", ":", "log", ".", "debug", "(", "'loading SSH public key: %r'", ",", "line", ")", "file_type", ",", "base64blob", ",", "name", "=", "line", ".", "split", "(", ")", "blob", "=", "base64", ".", "b64decode", "(", ...
Parse public key textual format, as saved at a .pub file.
[ "Parse", "public", "key", "textual", "format", "as", "saved", "at", "a", ".", "pub", "file", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/formats.py#L194-L203
train
romanz/trezor-agent
libagent/gpg/decode.py
parse_packets
def parse_packets(stream): """ Support iterative parsing of available GPG packets. See https://tools.ietf.org/html/rfc4880#section-4.2 for details. """ reader = util.Reader(stream) while True: try: value = reader.readfmt('B') except EOFError: return log.debug('prefix byte: %s', bin(value)) assert util.bit(value, 7) == 1 tag = util.low_bits(value, 6) if util.bit(value, 6) == 0: length_type = util.low_bits(tag, 2) tag = tag >> 2 fmt = {0: '>B', 1: '>H', 2: '>L'}[length_type] packet_size = reader.readfmt(fmt) else: first = reader.readfmt('B') if first < 192: packet_size = first elif first < 224: packet_size = ((first - 192) << 8) + reader.readfmt('B') + 192 elif first == 255: packet_size = reader.readfmt('>L') else: log.error('Partial Body Lengths unsupported') log.debug('packet length: %d', packet_size) packet_data = reader.read(packet_size) packet_type = PACKET_TYPES.get(tag) p = {'type': 'unknown', 'tag': tag, 'raw': packet_data} if packet_type is not None: try: p = packet_type(util.Reader(io.BytesIO(packet_data))) p['tag'] = tag except ValueError: log.exception('Skipping packet: %s', util.hexlify(packet_data)) log.debug('packet "%s": %s', p['type'], p) yield p
python
def parse_packets(stream): """ Support iterative parsing of available GPG packets. See https://tools.ietf.org/html/rfc4880#section-4.2 for details. """ reader = util.Reader(stream) while True: try: value = reader.readfmt('B') except EOFError: return log.debug('prefix byte: %s', bin(value)) assert util.bit(value, 7) == 1 tag = util.low_bits(value, 6) if util.bit(value, 6) == 0: length_type = util.low_bits(tag, 2) tag = tag >> 2 fmt = {0: '>B', 1: '>H', 2: '>L'}[length_type] packet_size = reader.readfmt(fmt) else: first = reader.readfmt('B') if first < 192: packet_size = first elif first < 224: packet_size = ((first - 192) << 8) + reader.readfmt('B') + 192 elif first == 255: packet_size = reader.readfmt('>L') else: log.error('Partial Body Lengths unsupported') log.debug('packet length: %d', packet_size) packet_data = reader.read(packet_size) packet_type = PACKET_TYPES.get(tag) p = {'type': 'unknown', 'tag': tag, 'raw': packet_data} if packet_type is not None: try: p = packet_type(util.Reader(io.BytesIO(packet_data))) p['tag'] = tag except ValueError: log.exception('Skipping packet: %s', util.hexlify(packet_data)) log.debug('packet "%s": %s', p['type'], p) yield p
[ "def", "parse_packets", "(", "stream", ")", ":", "reader", "=", "util", ".", "Reader", "(", "stream", ")", "while", "True", ":", "try", ":", "value", "=", "reader", ".", "readfmt", "(", "'B'", ")", "except", "EOFError", ":", "return", "log", ".", "de...
Support iterative parsing of available GPG packets. See https://tools.ietf.org/html/rfc4880#section-4.2 for details.
[ "Support", "iterative", "parsing", "of", "available", "GPG", "packets", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L215-L261
train
romanz/trezor-agent
libagent/gpg/decode.py
digest_packets
def digest_packets(packets, hasher): """Compute digest on specified packets, according to '_to_hash' field.""" data_to_hash = io.BytesIO() for p in packets: data_to_hash.write(p['_to_hash']) hasher.update(data_to_hash.getvalue()) return hasher.digest()
python
def digest_packets(packets, hasher): """Compute digest on specified packets, according to '_to_hash' field.""" data_to_hash = io.BytesIO() for p in packets: data_to_hash.write(p['_to_hash']) hasher.update(data_to_hash.getvalue()) return hasher.digest()
[ "def", "digest_packets", "(", "packets", ",", "hasher", ")", ":", "data_to_hash", "=", "io", ".", "BytesIO", "(", ")", "for", "p", "in", "packets", ":", "data_to_hash", ".", "write", "(", "p", "[", "'_to_hash'", "]", ")", "hasher", ".", "update", "(", ...
Compute digest on specified packets, according to '_to_hash' field.
[ "Compute", "digest", "on", "specified", "packets", "according", "to", "_to_hash", "field", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L264-L270
train
romanz/trezor-agent
libagent/gpg/decode.py
load_by_keygrip
def load_by_keygrip(pubkey_bytes, keygrip): """Return public key and first user ID for specified keygrip.""" stream = io.BytesIO(pubkey_bytes) packets = list(parse_packets(stream)) packets_per_pubkey = [] for p in packets: if p['type'] == 'pubkey': # Add a new packet list for each pubkey. packets_per_pubkey.append([]) packets_per_pubkey[-1].append(p) for packets in packets_per_pubkey: user_ids = [p for p in packets if p['type'] == 'user_id'] for p in packets: if p.get('keygrip') == keygrip: return p, user_ids raise KeyError('{} keygrip not found'.format(util.hexlify(keygrip)))
python
def load_by_keygrip(pubkey_bytes, keygrip): """Return public key and first user ID for specified keygrip.""" stream = io.BytesIO(pubkey_bytes) packets = list(parse_packets(stream)) packets_per_pubkey = [] for p in packets: if p['type'] == 'pubkey': # Add a new packet list for each pubkey. packets_per_pubkey.append([]) packets_per_pubkey[-1].append(p) for packets in packets_per_pubkey: user_ids = [p for p in packets if p['type'] == 'user_id'] for p in packets: if p.get('keygrip') == keygrip: return p, user_ids raise KeyError('{} keygrip not found'.format(util.hexlify(keygrip)))
[ "def", "load_by_keygrip", "(", "pubkey_bytes", ",", "keygrip", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "pubkey_bytes", ")", "packets", "=", "list", "(", "parse_packets", "(", "stream", ")", ")", "packets_per_pubkey", "=", "[", "]", "for", "p", ...
Return public key and first user ID for specified keygrip.
[ "Return", "public", "key", "and", "first", "user", "ID", "for", "specified", "keygrip", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L284-L300
train
romanz/trezor-agent
libagent/gpg/decode.py
load_signature
def load_signature(stream, original_data): """Load signature from stream, and compute GPG digest for verification.""" signature, = list(parse_packets((stream))) hash_alg = HASH_ALGORITHMS[signature['hash_alg']] digest = digest_packets([{'_to_hash': original_data}, signature], hasher=hashlib.new(hash_alg)) assert signature['hash_prefix'] == digest[:2] return signature, digest
python
def load_signature(stream, original_data): """Load signature from stream, and compute GPG digest for verification.""" signature, = list(parse_packets((stream))) hash_alg = HASH_ALGORITHMS[signature['hash_alg']] digest = digest_packets([{'_to_hash': original_data}, signature], hasher=hashlib.new(hash_alg)) assert signature['hash_prefix'] == digest[:2] return signature, digest
[ "def", "load_signature", "(", "stream", ",", "original_data", ")", ":", "signature", ",", "=", "list", "(", "parse_packets", "(", "(", "stream", ")", ")", ")", "hash_alg", "=", "HASH_ALGORITHMS", "[", "signature", "[", "'hash_alg'", "]", "]", "digest", "="...
Load signature from stream, and compute GPG digest for verification.
[ "Load", "signature", "from", "stream", "and", "compute", "GPG", "digest", "for", "verification", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L303-L310
train
romanz/trezor-agent
libagent/gpg/decode.py
remove_armor
def remove_armor(armored_data): """Decode armored data into its binary form.""" stream = io.BytesIO(armored_data) lines = stream.readlines()[3:-1] data = base64.b64decode(b''.join(lines)) payload, checksum = data[:-3], data[-3:] assert util.crc24(payload) == checksum return payload
python
def remove_armor(armored_data): """Decode armored data into its binary form.""" stream = io.BytesIO(armored_data) lines = stream.readlines()[3:-1] data = base64.b64decode(b''.join(lines)) payload, checksum = data[:-3], data[-3:] assert util.crc24(payload) == checksum return payload
[ "def", "remove_armor", "(", "armored_data", ")", ":", "stream", "=", "io", ".", "BytesIO", "(", "armored_data", ")", "lines", "=", "stream", ".", "readlines", "(", ")", "[", "3", ":", "-", "1", "]", "data", "=", "base64", ".", "b64decode", "(", "b''"...
Decode armored data into its binary form.
[ "Decode", "armored", "data", "into", "its", "binary", "form", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/decode.py#L313-L320
train
romanz/trezor-agent
libagent/server.py
remove_file
def remove_file(path, remove=os.remove, exists=os.path.exists): """Remove file, and raise OSError if still exists.""" try: remove(path) except OSError: if exists(path): raise
python
def remove_file(path, remove=os.remove, exists=os.path.exists): """Remove file, and raise OSError if still exists.""" try: remove(path) except OSError: if exists(path): raise
[ "def", "remove_file", "(", "path", ",", "remove", "=", "os", ".", "remove", ",", "exists", "=", "os", ".", "path", ".", "exists", ")", ":", "try", ":", "remove", "(", "path", ")", "except", "OSError", ":", "if", "exists", "(", "path", ")", ":", "...
Remove file, and raise OSError if still exists.
[ "Remove", "file", "and", "raise", "OSError", "if", "still", "exists", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/server.py#L14-L20
train
romanz/trezor-agent
libagent/server.py
unix_domain_socket_server
def unix_domain_socket_server(sock_path): """ Create UNIX-domain socket on specified path. Listen on it, and delete it after the generated context is over. """ log.debug('serving on %s', sock_path) remove_file(sock_path) server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server.bind(sock_path) server.listen(1) try: yield server finally: remove_file(sock_path)
python
def unix_domain_socket_server(sock_path): """ Create UNIX-domain socket on specified path. Listen on it, and delete it after the generated context is over. """ log.debug('serving on %s', sock_path) remove_file(sock_path) server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) server.bind(sock_path) server.listen(1) try: yield server finally: remove_file(sock_path)
[ "def", "unix_domain_socket_server", "(", "sock_path", ")", ":", "log", ".", "debug", "(", "'serving on %s'", ",", "sock_path", ")", "remove_file", "(", "sock_path", ")", "server", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", "....
Create UNIX-domain socket on specified path. Listen on it, and delete it after the generated context is over.
[ "Create", "UNIX", "-", "domain", "socket", "on", "specified", "path", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/server.py#L24-L39
train
romanz/trezor-agent
libagent/server.py
handle_connection
def handle_connection(conn, handler, mutex): """ Handle a single connection using the specified protocol handler in a loop. Since this function may be called concurrently from server_thread, the specified mutex is used to synchronize the device handling. Exit when EOFError is raised. All other exceptions are logged as warnings. """ try: log.debug('welcome agent') with contextlib.closing(conn): while True: msg = util.read_frame(conn) with mutex: reply = handler.handle(msg=msg) util.send(conn, reply) except EOFError: log.debug('goodbye agent') except Exception as e: # pylint: disable=broad-except log.warning('error: %s', e, exc_info=True)
python
def handle_connection(conn, handler, mutex): """ Handle a single connection using the specified protocol handler in a loop. Since this function may be called concurrently from server_thread, the specified mutex is used to synchronize the device handling. Exit when EOFError is raised. All other exceptions are logged as warnings. """ try: log.debug('welcome agent') with contextlib.closing(conn): while True: msg = util.read_frame(conn) with mutex: reply = handler.handle(msg=msg) util.send(conn, reply) except EOFError: log.debug('goodbye agent') except Exception as e: # pylint: disable=broad-except log.warning('error: %s', e, exc_info=True)
[ "def", "handle_connection", "(", "conn", ",", "handler", ",", "mutex", ")", ":", "try", ":", "log", ".", "debug", "(", "'welcome agent'", ")", "with", "contextlib", ".", "closing", "(", "conn", ")", ":", "while", "True", ":", "msg", "=", "util", ".", ...
Handle a single connection using the specified protocol handler in a loop. Since this function may be called concurrently from server_thread, the specified mutex is used to synchronize the device handling. Exit when EOFError is raised. All other exceptions are logged as warnings.
[ "Handle", "a", "single", "connection", "using", "the", "specified", "protocol", "handler", "in", "a", "loop", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/server.py#L79-L100
train
romanz/trezor-agent
libagent/server.py
retry
def retry(func, exception_type, quit_event): """ Run the function, retrying when the specified exception_type occurs. Poll quit_event on each iteration, to be responsive to an external exit request. """ while True: if quit_event.is_set(): raise StopIteration try: return func() except exception_type: pass
python
def retry(func, exception_type, quit_event): """ Run the function, retrying when the specified exception_type occurs. Poll quit_event on each iteration, to be responsive to an external exit request. """ while True: if quit_event.is_set(): raise StopIteration try: return func() except exception_type: pass
[ "def", "retry", "(", "func", ",", "exception_type", ",", "quit_event", ")", ":", "while", "True", ":", "if", "quit_event", ".", "is_set", "(", ")", ":", "raise", "StopIteration", "try", ":", "return", "func", "(", ")", "except", "exception_type", ":", "p...
Run the function, retrying when the specified exception_type occurs. Poll quit_event on each iteration, to be responsive to an external exit request.
[ "Run", "the", "function", "retrying", "when", "the", "specified", "exception_type", "occurs", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/server.py#L103-L116
train
romanz/trezor-agent
libagent/server.py
spawn
def spawn(func, kwargs): """Spawn a thread, and join it after the context is over.""" t = threading.Thread(target=func, kwargs=kwargs) t.start() yield t.join()
python
def spawn(func, kwargs): """Spawn a thread, and join it after the context is over.""" t = threading.Thread(target=func, kwargs=kwargs) t.start() yield t.join()
[ "def", "spawn", "(", "func", ",", "kwargs", ")", ":", "t", "=", "threading", ".", "Thread", "(", "target", "=", "func", ",", "kwargs", "=", "kwargs", ")", "t", ".", "start", "(", ")", "yield", "t", ".", "join", "(", ")" ]
Spawn a thread, and join it after the context is over.
[ "Spawn", "a", "thread", "and", "join", "it", "after", "the", "context", "is", "over", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/server.py#L142-L147
train
romanz/trezor-agent
libagent/server.py
run_process
def run_process(command, environ): """ Run the specified process and wait until it finishes. Use environ dict for environment variables. """ log.info('running %r with %r', command, environ) env = dict(os.environ) env.update(environ) try: p = subprocess.Popen(args=command, env=env) except OSError as e: raise OSError('cannot run %r: %s' % (command, e)) log.debug('subprocess %d is running', p.pid) ret = p.wait() log.debug('subprocess %d exited: %d', p.pid, ret) return ret
python
def run_process(command, environ): """ Run the specified process and wait until it finishes. Use environ dict for environment variables. """ log.info('running %r with %r', command, environ) env = dict(os.environ) env.update(environ) try: p = subprocess.Popen(args=command, env=env) except OSError as e: raise OSError('cannot run %r: %s' % (command, e)) log.debug('subprocess %d is running', p.pid) ret = p.wait() log.debug('subprocess %d exited: %d', p.pid, ret) return ret
[ "def", "run_process", "(", "command", ",", "environ", ")", ":", "log", ".", "info", "(", "'running %r with %r'", ",", "command", ",", "environ", ")", "env", "=", "dict", "(", "os", ".", "environ", ")", "env", ".", "update", "(", "environ", ")", "try", ...
Run the specified process and wait until it finishes. Use environ dict for environment variables.
[ "Run", "the", "specified", "process", "and", "wait", "until", "it", "finishes", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/server.py#L150-L166
train
romanz/trezor-agent
libagent/gpg/keyring.py
check_output
def check_output(args, env=None, sp=subprocess): """Call an external binary and return its stdout.""" log.debug('calling %s with env %s', args, env) output = sp.check_output(args=args, env=env) log.debug('output: %r', output) return output
python
def check_output(args, env=None, sp=subprocess): """Call an external binary and return its stdout.""" log.debug('calling %s with env %s', args, env) output = sp.check_output(args=args, env=env) log.debug('output: %r', output) return output
[ "def", "check_output", "(", "args", ",", "env", "=", "None", ",", "sp", "=", "subprocess", ")", ":", "log", ".", "debug", "(", "'calling %s with env %s'", ",", "args", ",", "env", ")", "output", "=", "sp", ".", "check_output", "(", "args", "=", "args",...
Call an external binary and return its stdout.
[ "Call", "an", "external", "binary", "and", "return", "its", "stdout", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L17-L22
train
romanz/trezor-agent
libagent/gpg/keyring.py
get_agent_sock_path
def get_agent_sock_path(env=None, sp=subprocess): """Parse gpgconf output to find out GPG agent UNIX socket path.""" args = [util.which('gpgconf'), '--list-dirs'] output = check_output(args=args, env=env, sp=sp) lines = output.strip().split(b'\n') dirs = dict(line.split(b':', 1) for line in lines) log.debug('%s: %s', args, dirs) return dirs[b'agent-socket']
python
def get_agent_sock_path(env=None, sp=subprocess): """Parse gpgconf output to find out GPG agent UNIX socket path.""" args = [util.which('gpgconf'), '--list-dirs'] output = check_output(args=args, env=env, sp=sp) lines = output.strip().split(b'\n') dirs = dict(line.split(b':', 1) for line in lines) log.debug('%s: %s', args, dirs) return dirs[b'agent-socket']
[ "def", "get_agent_sock_path", "(", "env", "=", "None", ",", "sp", "=", "subprocess", ")", ":", "args", "=", "[", "util", ".", "which", "(", "'gpgconf'", ")", ",", "'--list-dirs'", "]", "output", "=", "check_output", "(", "args", "=", "args", ",", "env"...
Parse gpgconf output to find out GPG agent UNIX socket path.
[ "Parse", "gpgconf", "output", "to", "find", "out", "GPG", "agent", "UNIX", "socket", "path", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L25-L32
train
romanz/trezor-agent
libagent/gpg/keyring.py
connect_to_agent
def connect_to_agent(env=None, sp=subprocess): """Connect to GPG agent's UNIX socket.""" sock_path = get_agent_sock_path(sp=sp, env=env) # Make sure the original gpg-agent is running. check_output(args=['gpg-connect-agent', '/bye'], sp=sp) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(sock_path) return sock
python
def connect_to_agent(env=None, sp=subprocess): """Connect to GPG agent's UNIX socket.""" sock_path = get_agent_sock_path(sp=sp, env=env) # Make sure the original gpg-agent is running. check_output(args=['gpg-connect-agent', '/bye'], sp=sp) sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.connect(sock_path) return sock
[ "def", "connect_to_agent", "(", "env", "=", "None", ",", "sp", "=", "subprocess", ")", ":", "sock_path", "=", "get_agent_sock_path", "(", "sp", "=", "sp", ",", "env", "=", "env", ")", "check_output", "(", "args", "=", "[", "'gpg-connect-agent'", ",", "'/...
Connect to GPG agent's UNIX socket.
[ "Connect", "to", "GPG", "agent", "s", "UNIX", "socket", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L35-L42
train
romanz/trezor-agent
libagent/gpg/keyring.py
sendline
def sendline(sock, msg, confidential=False): """Send a binary message, followed by EOL.""" log.debug('<- %r', ('<snip>' if confidential else msg)) sock.sendall(msg + b'\n')
python
def sendline(sock, msg, confidential=False): """Send a binary message, followed by EOL.""" log.debug('<- %r', ('<snip>' if confidential else msg)) sock.sendall(msg + b'\n')
[ "def", "sendline", "(", "sock", ",", "msg", ",", "confidential", "=", "False", ")", ":", "log", ".", "debug", "(", "'<- %r'", ",", "(", "'<snip>'", "if", "confidential", "else", "msg", ")", ")", "sock", ".", "sendall", "(", "msg", "+", "b'\\n'", ")" ...
Send a binary message, followed by EOL.
[ "Send", "a", "binary", "message", "followed", "by", "EOL", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L51-L54
train
romanz/trezor-agent
libagent/gpg/keyring.py
recvline
def recvline(sock): """Receive a single line from the socket.""" reply = io.BytesIO() while True: c = sock.recv(1) if not c: return None # socket is closed if c == b'\n': break reply.write(c) result = reply.getvalue() log.debug('-> %r', result) return result
python
def recvline(sock): """Receive a single line from the socket.""" reply = io.BytesIO() while True: c = sock.recv(1) if not c: return None # socket is closed if c == b'\n': break reply.write(c) result = reply.getvalue() log.debug('-> %r', result) return result
[ "def", "recvline", "(", "sock", ")", ":", "reply", "=", "io", ".", "BytesIO", "(", ")", "while", "True", ":", "c", "=", "sock", ".", "recv", "(", "1", ")", "if", "not", "c", ":", "return", "None", "if", "c", "==", "b'\\n'", ":", "break", "reply...
Receive a single line from the socket.
[ "Receive", "a", "single", "line", "from", "the", "socket", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L57-L72
train
romanz/trezor-agent
libagent/gpg/keyring.py
parse_term
def parse_term(s): """Parse single s-expr term from bytes.""" size, s = s.split(b':', 1) size = int(size) return s[:size], s[size:]
python
def parse_term(s): """Parse single s-expr term from bytes.""" size, s = s.split(b':', 1) size = int(size) return s[:size], s[size:]
[ "def", "parse_term", "(", "s", ")", ":", "size", ",", "s", "=", "s", ".", "split", "(", "b':'", ",", "1", ")", "size", "=", "int", "(", "size", ")", "return", "s", "[", ":", "size", "]", ",", "s", "[", "size", ":", "]" ]
Parse single s-expr term from bytes.
[ "Parse", "single", "s", "-", "expr", "term", "from", "bytes", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L97-L101
train
romanz/trezor-agent
libagent/gpg/keyring.py
parse
def parse(s): """Parse full s-expr from bytes.""" if s.startswith(b'('): s = s[1:] name, s = parse_term(s) values = [name] while not s.startswith(b')'): value, s = parse(s) values.append(value) return values, s[1:] return parse_term(s)
python
def parse(s): """Parse full s-expr from bytes.""" if s.startswith(b'('): s = s[1:] name, s = parse_term(s) values = [name] while not s.startswith(b')'): value, s = parse(s) values.append(value) return values, s[1:] return parse_term(s)
[ "def", "parse", "(", "s", ")", ":", "if", "s", ".", "startswith", "(", "b'('", ")", ":", "s", "=", "s", "[", "1", ":", "]", "name", ",", "s", "=", "parse_term", "(", "s", ")", "values", "=", "[", "name", "]", "while", "not", "s", ".", "star...
Parse full s-expr from bytes.
[ "Parse", "full", "s", "-", "expr", "from", "bytes", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L104-L115
train
romanz/trezor-agent
libagent/gpg/keyring.py
parse_sig
def parse_sig(sig): """Parse signature integer values from s-expr.""" label, sig = sig assert label == b'sig-val' algo_name = sig[0] parser = {b'rsa': _parse_rsa_sig, b'ecdsa': _parse_ecdsa_sig, b'eddsa': _parse_eddsa_sig, b'dsa': _parse_dsa_sig}[algo_name] return parser(args=sig[1:])
python
def parse_sig(sig): """Parse signature integer values from s-expr.""" label, sig = sig assert label == b'sig-val' algo_name = sig[0] parser = {b'rsa': _parse_rsa_sig, b'ecdsa': _parse_ecdsa_sig, b'eddsa': _parse_eddsa_sig, b'dsa': _parse_dsa_sig}[algo_name] return parser(args=sig[1:])
[ "def", "parse_sig", "(", "sig", ")", ":", "label", ",", "sig", "=", "sig", "assert", "label", "==", "b'sig-val'", "algo_name", "=", "sig", "[", "0", "]", "parser", "=", "{", "b'rsa'", ":", "_parse_rsa_sig", ",", "b'ecdsa'", ":", "_parse_ecdsa_sig", ",", ...
Parse signature integer values from s-expr.
[ "Parse", "signature", "integer", "values", "from", "s", "-", "expr", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L137-L146
train
romanz/trezor-agent
libagent/gpg/keyring.py
sign_digest
def sign_digest(sock, keygrip, digest, sp=subprocess, environ=None): """Sign a digest using specified key using GPG agent.""" hash_algo = 8 # SHA256 assert len(digest) == 32 assert communicate(sock, 'RESET').startswith(b'OK') ttyname = check_output(args=['tty'], sp=sp).strip() options = ['ttyname={}'.format(ttyname)] # set TTY for passphrase entry display = (environ or os.environ).get('DISPLAY') if display is not None: options.append('display={}'.format(display)) for opt in options: assert communicate(sock, 'OPTION {}'.format(opt)) == b'OK' assert communicate(sock, 'SIGKEY {}'.format(keygrip)) == b'OK' hex_digest = binascii.hexlify(digest).upper().decode('ascii') assert communicate(sock, 'SETHASH {} {}'.format(hash_algo, hex_digest)) == b'OK' assert communicate(sock, 'SETKEYDESC ' 'Sign+a+new+TREZOR-based+subkey') == b'OK' assert communicate(sock, 'PKSIGN') == b'OK' while True: line = recvline(sock).strip() if line.startswith(b'S PROGRESS'): continue else: break line = unescape(line) log.debug('unescaped: %r', line) prefix, sig = line.split(b' ', 1) if prefix != b'D': raise ValueError(prefix) sig, leftover = parse(sig) assert not leftover, leftover return parse_sig(sig)
python
def sign_digest(sock, keygrip, digest, sp=subprocess, environ=None): """Sign a digest using specified key using GPG agent.""" hash_algo = 8 # SHA256 assert len(digest) == 32 assert communicate(sock, 'RESET').startswith(b'OK') ttyname = check_output(args=['tty'], sp=sp).strip() options = ['ttyname={}'.format(ttyname)] # set TTY for passphrase entry display = (environ or os.environ).get('DISPLAY') if display is not None: options.append('display={}'.format(display)) for opt in options: assert communicate(sock, 'OPTION {}'.format(opt)) == b'OK' assert communicate(sock, 'SIGKEY {}'.format(keygrip)) == b'OK' hex_digest = binascii.hexlify(digest).upper().decode('ascii') assert communicate(sock, 'SETHASH {} {}'.format(hash_algo, hex_digest)) == b'OK' assert communicate(sock, 'SETKEYDESC ' 'Sign+a+new+TREZOR-based+subkey') == b'OK' assert communicate(sock, 'PKSIGN') == b'OK' while True: line = recvline(sock).strip() if line.startswith(b'S PROGRESS'): continue else: break line = unescape(line) log.debug('unescaped: %r', line) prefix, sig = line.split(b' ', 1) if prefix != b'D': raise ValueError(prefix) sig, leftover = parse(sig) assert not leftover, leftover return parse_sig(sig)
[ "def", "sign_digest", "(", "sock", ",", "keygrip", ",", "digest", ",", "sp", "=", "subprocess", ",", "environ", "=", "None", ")", ":", "hash_algo", "=", "8", "assert", "len", "(", "digest", ")", "==", "32", "assert", "communicate", "(", "sock", ",", ...
Sign a digest using specified key using GPG agent.
[ "Sign", "a", "digest", "using", "specified", "key", "using", "GPG", "agent", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L149-L188
train
romanz/trezor-agent
libagent/gpg/keyring.py
get_gnupg_components
def get_gnupg_components(sp=subprocess): """Parse GnuPG components' paths.""" args = [util.which('gpgconf'), '--list-components'] output = check_output(args=args, sp=sp) components = dict(re.findall('(.*):.*:(.*)', output.decode('utf-8'))) log.debug('gpgconf --list-components: %s', components) return components
python
def get_gnupg_components(sp=subprocess): """Parse GnuPG components' paths.""" args = [util.which('gpgconf'), '--list-components'] output = check_output(args=args, sp=sp) components = dict(re.findall('(.*):.*:(.*)', output.decode('utf-8'))) log.debug('gpgconf --list-components: %s', components) return components
[ "def", "get_gnupg_components", "(", "sp", "=", "subprocess", ")", ":", "args", "=", "[", "util", ".", "which", "(", "'gpgconf'", ")", ",", "'--list-components'", "]", "output", "=", "check_output", "(", "args", "=", "args", ",", "sp", "=", "sp", ")", "...
Parse GnuPG components' paths.
[ "Parse", "GnuPG", "components", "paths", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L191-L197
train
romanz/trezor-agent
libagent/gpg/keyring.py
gpg_command
def gpg_command(args, env=None): """Prepare common GPG command line arguments.""" if env is None: env = os.environ cmd = get_gnupg_binary(neopg_binary=env.get('NEOPG_BINARY')) return [cmd] + args
python
def gpg_command(args, env=None): """Prepare common GPG command line arguments.""" if env is None: env = os.environ cmd = get_gnupg_binary(neopg_binary=env.get('NEOPG_BINARY')) return [cmd] + args
[ "def", "gpg_command", "(", "args", ",", "env", "=", "None", ")", ":", "if", "env", "is", "None", ":", "env", "=", "os", ".", "environ", "cmd", "=", "get_gnupg_binary", "(", "neopg_binary", "=", "env", ".", "get", "(", "'NEOPG_BINARY'", ")", ")", "ret...
Prepare common GPG command line arguments.
[ "Prepare", "common", "GPG", "command", "line", "arguments", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L208-L213
train
romanz/trezor-agent
libagent/gpg/keyring.py
export_public_key
def export_public_key(user_id, env=None, sp=subprocess): """Export GPG public key for specified `user_id`.""" args = gpg_command(['--export', user_id]) result = check_output(args=args, env=env, sp=sp) if not result: log.error('could not find public key %r in local GPG keyring', user_id) raise KeyError(user_id) return result
python
def export_public_key(user_id, env=None, sp=subprocess): """Export GPG public key for specified `user_id`.""" args = gpg_command(['--export', user_id]) result = check_output(args=args, env=env, sp=sp) if not result: log.error('could not find public key %r in local GPG keyring', user_id) raise KeyError(user_id) return result
[ "def", "export_public_key", "(", "user_id", ",", "env", "=", "None", ",", "sp", "=", "subprocess", ")", ":", "args", "=", "gpg_command", "(", "[", "'--export'", ",", "user_id", "]", ")", "result", "=", "check_output", "(", "args", "=", "args", ",", "en...
Export GPG public key for specified `user_id`.
[ "Export", "GPG", "public", "key", "for", "specified", "user_id", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L233-L240
train
romanz/trezor-agent
libagent/gpg/keyring.py
export_public_keys
def export_public_keys(env=None, sp=subprocess): """Export all GPG public keys.""" args = gpg_command(['--export']) result = check_output(args=args, env=env, sp=sp) if not result: raise KeyError('No GPG public keys found at env: {!r}'.format(env)) return result
python
def export_public_keys(env=None, sp=subprocess): """Export all GPG public keys.""" args = gpg_command(['--export']) result = check_output(args=args, env=env, sp=sp) if not result: raise KeyError('No GPG public keys found at env: {!r}'.format(env)) return result
[ "def", "export_public_keys", "(", "env", "=", "None", ",", "sp", "=", "subprocess", ")", ":", "args", "=", "gpg_command", "(", "[", "'--export'", "]", ")", "result", "=", "check_output", "(", "args", "=", "args", ",", "env", "=", "env", ",", "sp", "=...
Export all GPG public keys.
[ "Export", "all", "GPG", "public", "keys", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L243-L249
train
romanz/trezor-agent
libagent/gpg/keyring.py
create_agent_signer
def create_agent_signer(user_id): """Sign digest with existing GPG keys using gpg-agent tool.""" sock = connect_to_agent(env=os.environ) keygrip = get_keygrip(user_id) def sign(digest): """Sign the digest and return an ECDSA/RSA/DSA signature.""" return sign_digest(sock=sock, keygrip=keygrip, digest=digest) return sign
python
def create_agent_signer(user_id): """Sign digest with existing GPG keys using gpg-agent tool.""" sock = connect_to_agent(env=os.environ) keygrip = get_keygrip(user_id) def sign(digest): """Sign the digest and return an ECDSA/RSA/DSA signature.""" return sign_digest(sock=sock, keygrip=keygrip, digest=digest) return sign
[ "def", "create_agent_signer", "(", "user_id", ")", ":", "sock", "=", "connect_to_agent", "(", "env", "=", "os", ".", "environ", ")", "keygrip", "=", "get_keygrip", "(", "user_id", ")", "def", "sign", "(", "digest", ")", ":", "return", "sign_digest", "(", ...
Sign digest with existing GPG keys using gpg-agent tool.
[ "Sign", "digest", "with", "existing", "GPG", "keys", "using", "gpg", "-", "agent", "tool", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/gpg/keyring.py#L252-L261
train
romanz/trezor-agent
libagent/ssh/protocol.py
msg_name
def msg_name(code): """Convert integer message code into a string name.""" ids = {v: k for k, v in COMMANDS.items()} return ids[code]
python
def msg_name(code): """Convert integer message code into a string name.""" ids = {v: k for k, v in COMMANDS.items()} return ids[code]
[ "def", "msg_name", "(", "code", ")", ":", "ids", "=", "{", "v", ":", "k", "for", "k", ",", "v", "in", "COMMANDS", ".", "items", "(", ")", "}", "return", "ids", "[", "code", "]" ]
Convert integer message code into a string name.
[ "Convert", "integer", "message", "code", "into", "a", "string", "name", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/protocol.py#L51-L54
train
romanz/trezor-agent
libagent/ssh/protocol.py
_legacy_pubs
def _legacy_pubs(buf): """SSH v1 public keys are not supported.""" leftover = buf.read() if leftover: log.warning('skipping leftover: %r', leftover) code = util.pack('B', msg_code('SSH_AGENT_RSA_IDENTITIES_ANSWER')) num = util.pack('L', 0) # no SSH v1 keys return util.frame(code, num)
python
def _legacy_pubs(buf): """SSH v1 public keys are not supported.""" leftover = buf.read() if leftover: log.warning('skipping leftover: %r', leftover) code = util.pack('B', msg_code('SSH_AGENT_RSA_IDENTITIES_ANSWER')) num = util.pack('L', 0) # no SSH v1 keys return util.frame(code, num)
[ "def", "_legacy_pubs", "(", "buf", ")", ":", "leftover", "=", "buf", ".", "read", "(", ")", "if", "leftover", ":", "log", ".", "warning", "(", "'skipping leftover: %r'", ",", "leftover", ")", "code", "=", "util", ".", "pack", "(", "'B'", ",", "msg_code...
SSH v1 public keys are not supported.
[ "SSH", "v1", "public", "keys", "are", "not", "supported", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/protocol.py#L63-L70
train
romanz/trezor-agent
libagent/ssh/protocol.py
Handler.handle
def handle(self, msg): """Handle SSH message from the SSH client and return the response.""" debug_msg = ': {!r}'.format(msg) if self.debug else '' log.debug('request: %d bytes%s', len(msg), debug_msg) buf = io.BytesIO(msg) code, = util.recv(buf, '>B') if code not in self.methods: log.warning('Unsupported command: %s (%d)', msg_name(code), code) return failure() method = self.methods[code] log.debug('calling %s()', method.__name__) reply = method(buf=buf) debug_reply = ': {!r}'.format(reply) if self.debug else '' log.debug('reply: %d bytes%s', len(reply), debug_reply) return reply
python
def handle(self, msg): """Handle SSH message from the SSH client and return the response.""" debug_msg = ': {!r}'.format(msg) if self.debug else '' log.debug('request: %d bytes%s', len(msg), debug_msg) buf = io.BytesIO(msg) code, = util.recv(buf, '>B') if code not in self.methods: log.warning('Unsupported command: %s (%d)', msg_name(code), code) return failure() method = self.methods[code] log.debug('calling %s()', method.__name__) reply = method(buf=buf) debug_reply = ': {!r}'.format(reply) if self.debug else '' log.debug('reply: %d bytes%s', len(reply), debug_reply) return reply
[ "def", "handle", "(", "self", ",", "msg", ")", ":", "debug_msg", "=", "': {!r}'", ".", "format", "(", "msg", ")", "if", "self", ".", "debug", "else", "''", "log", ".", "debug", "(", "'request: %d bytes%s'", ",", "len", "(", "msg", ")", ",", "debug_ms...
Handle SSH message from the SSH client and return the response.
[ "Handle", "SSH", "message", "from", "the", "SSH", "client", "and", "return", "the", "response", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/protocol.py#L91-L106
train
romanz/trezor-agent
libagent/ssh/protocol.py
Handler.list_pubs
def list_pubs(self, buf): """SSH v2 public keys are serialized and returned.""" assert not buf.read() keys = self.conn.parse_public_keys() code = util.pack('B', msg_code('SSH2_AGENT_IDENTITIES_ANSWER')) num = util.pack('L', len(keys)) log.debug('available keys: %s', [k['name'] for k in keys]) for i, k in enumerate(keys): log.debug('%2d) %s', i+1, k['fingerprint']) pubs = [util.frame(k['blob']) + util.frame(k['name']) for k in keys] return util.frame(code, num, *pubs)
python
def list_pubs(self, buf): """SSH v2 public keys are serialized and returned.""" assert not buf.read() keys = self.conn.parse_public_keys() code = util.pack('B', msg_code('SSH2_AGENT_IDENTITIES_ANSWER')) num = util.pack('L', len(keys)) log.debug('available keys: %s', [k['name'] for k in keys]) for i, k in enumerate(keys): log.debug('%2d) %s', i+1, k['fingerprint']) pubs = [util.frame(k['blob']) + util.frame(k['name']) for k in keys] return util.frame(code, num, *pubs)
[ "def", "list_pubs", "(", "self", ",", "buf", ")", ":", "assert", "not", "buf", ".", "read", "(", ")", "keys", "=", "self", ".", "conn", ".", "parse_public_keys", "(", ")", "code", "=", "util", ".", "pack", "(", "'B'", ",", "msg_code", "(", "'SSH2_A...
SSH v2 public keys are serialized and returned.
[ "SSH", "v2", "public", "keys", "are", "serialized", "and", "returned", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/protocol.py#L108-L118
train
romanz/trezor-agent
libagent/ssh/protocol.py
Handler.sign_message
def sign_message(self, buf): """ SSH v2 public key authentication is performed. If the required key is not supported, raise KeyError If the signature is invalid, raise ValueError """ key = formats.parse_pubkey(util.read_frame(buf)) log.debug('looking for %s', key['fingerprint']) blob = util.read_frame(buf) assert util.read_frame(buf) == b'' assert not buf.read() for k in self.conn.parse_public_keys(): if (k['fingerprint']) == (key['fingerprint']): log.debug('using key %r (%s)', k['name'], k['fingerprint']) key = k break else: raise KeyError('key not found') label = key['name'].decode('utf-8') log.debug('signing %d-byte blob with "%s" key', len(blob), label) try: signature = self.conn.sign(blob=blob, identity=key['identity']) except IOError: return failure() log.debug('signature: %r', signature) try: sig_bytes = key['verifier'](sig=signature, msg=blob) log.info('signature status: OK') except formats.ecdsa.BadSignatureError: log.exception('signature status: ERROR') raise ValueError('invalid ECDSA signature') log.debug('signature size: %d bytes', len(sig_bytes)) data = util.frame(util.frame(key['type']), util.frame(sig_bytes)) code = util.pack('B', msg_code('SSH2_AGENT_SIGN_RESPONSE')) return util.frame(code, data)
python
def sign_message(self, buf): """ SSH v2 public key authentication is performed. If the required key is not supported, raise KeyError If the signature is invalid, raise ValueError """ key = formats.parse_pubkey(util.read_frame(buf)) log.debug('looking for %s', key['fingerprint']) blob = util.read_frame(buf) assert util.read_frame(buf) == b'' assert not buf.read() for k in self.conn.parse_public_keys(): if (k['fingerprint']) == (key['fingerprint']): log.debug('using key %r (%s)', k['name'], k['fingerprint']) key = k break else: raise KeyError('key not found') label = key['name'].decode('utf-8') log.debug('signing %d-byte blob with "%s" key', len(blob), label) try: signature = self.conn.sign(blob=blob, identity=key['identity']) except IOError: return failure() log.debug('signature: %r', signature) try: sig_bytes = key['verifier'](sig=signature, msg=blob) log.info('signature status: OK') except formats.ecdsa.BadSignatureError: log.exception('signature status: ERROR') raise ValueError('invalid ECDSA signature') log.debug('signature size: %d bytes', len(sig_bytes)) data = util.frame(util.frame(key['type']), util.frame(sig_bytes)) code = util.pack('B', msg_code('SSH2_AGENT_SIGN_RESPONSE')) return util.frame(code, data)
[ "def", "sign_message", "(", "self", ",", "buf", ")", ":", "key", "=", "formats", ".", "parse_pubkey", "(", "util", ".", "read_frame", "(", "buf", ")", ")", "log", ".", "debug", "(", "'looking for %s'", ",", "key", "[", "'fingerprint'", "]", ")", "blob"...
SSH v2 public key authentication is performed. If the required key is not supported, raise KeyError If the signature is invalid, raise ValueError
[ "SSH", "v2", "public", "key", "authentication", "is", "performed", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/ssh/protocol.py#L120-L160
train
romanz/trezor-agent
libagent/util.py
recv
def recv(conn, size): """ Receive bytes from connection socket or stream. If size is struct.calcsize()-compatible format, use it to unpack the data. Otherwise, return the plain blob as bytes. """ try: fmt = size size = struct.calcsize(fmt) except TypeError: fmt = None try: _read = conn.recv except AttributeError: _read = conn.read res = io.BytesIO() while size > 0: buf = _read(size) if not buf: raise EOFError size = size - len(buf) res.write(buf) res = res.getvalue() if fmt: return struct.unpack(fmt, res) else: return res
python
def recv(conn, size): """ Receive bytes from connection socket or stream. If size is struct.calcsize()-compatible format, use it to unpack the data. Otherwise, return the plain blob as bytes. """ try: fmt = size size = struct.calcsize(fmt) except TypeError: fmt = None try: _read = conn.recv except AttributeError: _read = conn.read res = io.BytesIO() while size > 0: buf = _read(size) if not buf: raise EOFError size = size - len(buf) res.write(buf) res = res.getvalue() if fmt: return struct.unpack(fmt, res) else: return res
[ "def", "recv", "(", "conn", ",", "size", ")", ":", "try", ":", "fmt", "=", "size", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "except", "TypeError", ":", "fmt", "=", "None", "try", ":", "_read", "=", "conn", ".", "recv", "except", "...
Receive bytes from connection socket or stream. If size is struct.calcsize()-compatible format, use it to unpack the data. Otherwise, return the plain blob as bytes.
[ "Receive", "bytes", "from", "connection", "socket", "or", "stream", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L18-L46
train
romanz/trezor-agent
libagent/util.py
bytes2num
def bytes2num(s): """Convert MSB-first bytes to an unsigned integer.""" res = 0 for i, c in enumerate(reversed(bytearray(s))): res += c << (i * 8) return res
python
def bytes2num(s): """Convert MSB-first bytes to an unsigned integer.""" res = 0 for i, c in enumerate(reversed(bytearray(s))): res += c << (i * 8) return res
[ "def", "bytes2num", "(", "s", ")", ":", "res", "=", "0", "for", "i", ",", "c", "in", "enumerate", "(", "reversed", "(", "bytearray", "(", "s", ")", ")", ")", ":", "res", "+=", "c", "<<", "(", "i", "*", "8", ")", "return", "res" ]
Convert MSB-first bytes to an unsigned integer.
[ "Convert", "MSB", "-", "first", "bytes", "to", "an", "unsigned", "integer", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L55-L60
train
romanz/trezor-agent
libagent/util.py
num2bytes
def num2bytes(value, size): """Convert an unsigned integer to MSB-first bytes with specified size.""" res = [] for _ in range(size): res.append(value & 0xFF) value = value >> 8 assert value == 0 return bytes(bytearray(list(reversed(res))))
python
def num2bytes(value, size): """Convert an unsigned integer to MSB-first bytes with specified size.""" res = [] for _ in range(size): res.append(value & 0xFF) value = value >> 8 assert value == 0 return bytes(bytearray(list(reversed(res))))
[ "def", "num2bytes", "(", "value", ",", "size", ")", ":", "res", "=", "[", "]", "for", "_", "in", "range", "(", "size", ")", ":", "res", ".", "append", "(", "value", "&", "0xFF", ")", "value", "=", "value", ">>", "8", "assert", "value", "==", "0...
Convert an unsigned integer to MSB-first bytes with specified size.
[ "Convert", "an", "unsigned", "integer", "to", "MSB", "-", "first", "bytes", "with", "specified", "size", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L63-L70
train
romanz/trezor-agent
libagent/util.py
frame
def frame(*msgs): """Serialize MSB-first length-prefixed frame.""" res = io.BytesIO() for msg in msgs: res.write(msg) msg = res.getvalue() return pack('L', len(msg)) + msg
python
def frame(*msgs): """Serialize MSB-first length-prefixed frame.""" res = io.BytesIO() for msg in msgs: res.write(msg) msg = res.getvalue() return pack('L', len(msg)) + msg
[ "def", "frame", "(", "*", "msgs", ")", ":", "res", "=", "io", ".", "BytesIO", "(", ")", "for", "msg", "in", "msgs", ":", "res", ".", "write", "(", "msg", ")", "msg", "=", "res", ".", "getvalue", "(", ")", "return", "pack", "(", "'L'", ",", "l...
Serialize MSB-first length-prefixed frame.
[ "Serialize", "MSB", "-", "first", "length", "-", "prefixed", "frame", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L78-L84
train
romanz/trezor-agent
libagent/util.py
split_bits
def split_bits(value, *bits): """ Split integer value into list of ints, according to `bits` list. For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] """ result = [] for b in reversed(bits): mask = (1 << b) - 1 result.append(value & mask) value = value >> b assert value == 0 result.reverse() return result
python
def split_bits(value, *bits): """ Split integer value into list of ints, according to `bits` list. For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4] """ result = [] for b in reversed(bits): mask = (1 << b) - 1 result.append(value & mask) value = value >> b assert value == 0 result.reverse() return result
[ "def", "split_bits", "(", "value", ",", "*", "bits", ")", ":", "result", "=", "[", "]", "for", "b", "in", "reversed", "(", "bits", ")", ":", "mask", "=", "(", "1", "<<", "b", ")", "-", "1", "result", ".", "append", "(", "value", "&", "mask", ...
Split integer value into list of ints, according to `bits` list. For example, split_bits(0x1234, 4, 8, 4) == [0x1, 0x23, 0x4]
[ "Split", "integer", "value", "into", "list", "of", "ints", "according", "to", "bits", "list", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L115-L129
train
romanz/trezor-agent
libagent/util.py
readfmt
def readfmt(stream, fmt): """Read and unpack an object from stream, using a struct format string.""" size = struct.calcsize(fmt) blob = stream.read(size) return struct.unpack(fmt, blob)
python
def readfmt(stream, fmt): """Read and unpack an object from stream, using a struct format string.""" size = struct.calcsize(fmt) blob = stream.read(size) return struct.unpack(fmt, blob)
[ "def", "readfmt", "(", "stream", ",", "fmt", ")", ":", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "blob", "=", "stream", ".", "read", "(", "size", ")", "return", "struct", ".", "unpack", "(", "fmt", ",", "blob", ")" ]
Read and unpack an object from stream, using a struct format string.
[ "Read", "and", "unpack", "an", "object", "from", "stream", "using", "a", "struct", "format", "string", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L132-L136
train
romanz/trezor-agent
libagent/util.py
setup_logging
def setup_logging(verbosity, filename=None): """Configure logging for this tool.""" levels = [logging.WARNING, logging.INFO, logging.DEBUG] level = levels[min(verbosity, len(levels) - 1)] logging.root.setLevel(level) fmt = logging.Formatter('%(asctime)s %(levelname)-12s %(message)-100s ' '[%(filename)s:%(lineno)d]') hdlr = logging.StreamHandler() # stderr hdlr.setFormatter(fmt) logging.root.addHandler(hdlr) if filename: hdlr = logging.FileHandler(filename, 'a') hdlr.setFormatter(fmt) logging.root.addHandler(hdlr)
python
def setup_logging(verbosity, filename=None): """Configure logging for this tool.""" levels = [logging.WARNING, logging.INFO, logging.DEBUG] level = levels[min(verbosity, len(levels) - 1)] logging.root.setLevel(level) fmt = logging.Formatter('%(asctime)s %(levelname)-12s %(message)-100s ' '[%(filename)s:%(lineno)d]') hdlr = logging.StreamHandler() # stderr hdlr.setFormatter(fmt) logging.root.addHandler(hdlr) if filename: hdlr = logging.FileHandler(filename, 'a') hdlr.setFormatter(fmt) logging.root.addHandler(hdlr)
[ "def", "setup_logging", "(", "verbosity", ",", "filename", "=", "None", ")", ":", "levels", "=", "[", "logging", ".", "WARNING", ",", "logging", ".", "INFO", ",", "logging", ".", "DEBUG", "]", "level", "=", "levels", "[", "min", "(", "verbosity", ",", ...
Configure logging for this tool.
[ "Configure", "logging", "for", "this", "tool", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L183-L198
train
romanz/trezor-agent
libagent/util.py
which
def which(cmd): """Return full path to specified command, or raise OSError if missing.""" try: # For Python 3 from shutil import which as _which except ImportError: # For Python 2 from backports.shutil_which import which as _which # pylint: disable=relative-import full_path = _which(cmd) if full_path is None: raise OSError('Cannot find {!r} in $PATH'.format(cmd)) log.debug('which %r => %r', cmd, full_path) return full_path
python
def which(cmd): """Return full path to specified command, or raise OSError if missing.""" try: # For Python 3 from shutil import which as _which except ImportError: # For Python 2 from backports.shutil_which import which as _which # pylint: disable=relative-import full_path = _which(cmd) if full_path is None: raise OSError('Cannot find {!r} in $PATH'.format(cmd)) log.debug('which %r => %r', cmd, full_path) return full_path
[ "def", "which", "(", "cmd", ")", ":", "try", ":", "from", "shutil", "import", "which", "as", "_which", "except", "ImportError", ":", "from", "backports", ".", "shutil_which", "import", "which", "as", "_which", "full_path", "=", "_which", "(", "cmd", ")", ...
Return full path to specified command, or raise OSError if missing.
[ "Return", "full", "path", "to", "specified", "command", "or", "raise", "OSError", "if", "missing", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L238-L250
train
romanz/trezor-agent
libagent/util.py
Reader.readfmt
def readfmt(self, fmt): """Read a specified object, using a struct format string.""" size = struct.calcsize(fmt) blob = self.read(size) obj, = struct.unpack(fmt, blob) return obj
python
def readfmt(self, fmt): """Read a specified object, using a struct format string.""" size = struct.calcsize(fmt) blob = self.read(size) obj, = struct.unpack(fmt, blob) return obj
[ "def", "readfmt", "(", "self", ",", "fmt", ")", ":", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "blob", "=", "self", ".", "read", "(", "size", ")", "obj", ",", "=", "struct", ".", "unpack", "(", "fmt", ",", "blob", ")", "return", ...
Read a specified object, using a struct format string.
[ "Read", "a", "specified", "object", "using", "a", "struct", "format", "string", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L157-L162
train
romanz/trezor-agent
libagent/util.py
Reader.read
def read(self, size=None): """Read `size` bytes from stream.""" blob = self.s.read(size) if size is not None and len(blob) < size: raise EOFError if self._captured: self._captured.write(blob) return blob
python
def read(self, size=None): """Read `size` bytes from stream.""" blob = self.s.read(size) if size is not None and len(blob) < size: raise EOFError if self._captured: self._captured.write(blob) return blob
[ "def", "read", "(", "self", ",", "size", "=", "None", ")", ":", "blob", "=", "self", ".", "s", ".", "read", "(", "size", ")", "if", "size", "is", "not", "None", "and", "len", "(", "blob", ")", "<", "size", ":", "raise", "EOFError", "if", "self"...
Read `size` bytes from stream.
[ "Read", "size", "bytes", "from", "stream", "." ]
513b1259c4d7aca5f88cd958edc11828d0712f1b
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L164-L171
train