repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
robhowley/nhlscrapi
nhlscrapi/games/events.py
EventFactory.Create
def Create(event_type): """ Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event` """ if event_type in EventType.Name: # unknown event type gets base class if EventType.Name[event_type] == Event.__name__: return Event() else: # instantiate Event subclass with same name as EventType name return [t for t in EventFactory.event_list if t.__name__ == EventType.Name[event_type]][0]() else: raise TypeError("EventFactory.Create: Invalid EventType")
python
def Create(event_type): """ Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event` """ if event_type in EventType.Name: # unknown event type gets base class if EventType.Name[event_type] == Event.__name__: return Event() else: # instantiate Event subclass with same name as EventType name return [t for t in EventFactory.event_list if t.__name__ == EventType.Name[event_type]][0]() else: raise TypeError("EventFactory.Create: Invalid EventType")
[ "def", "Create", "(", "event_type", ")", ":", "if", "event_type", "in", "EventType", ".", "Name", ":", "# unknown event type gets base class", "if", "EventType", ".", "Name", "[", "event_type", "]", "==", "Event", ".", "__name__", ":", "return", "Event", "(", ")", "else", ":", "# instantiate Event subclass with same name as EventType name", "return", "[", "t", "for", "t", "in", "EventFactory", ".", "event_list", "if", "t", ".", "__name__", "==", "EventType", ".", "Name", "[", "event_type", "]", "]", "[", "0", "]", "(", ")", "else", ":", "raise", "TypeError", "(", "\"EventFactory.Create: Invalid EventType\"", ")" ]
Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event`
[ "Factory", "method", "creates", "objects", "derived", "from", ":", "py", ":", "class", ".", "Event", "with", "class", "name", "matching", "the", ":", "py", ":", "class", ".", "EventType", ".", ":", "param", "event_type", ":", "number", "for", "type", "of", "event", ":", "returns", ":", "constructed", "event", "corresponding", "to", "event_type", ":", "rtype", ":", ":", "py", ":", "class", ":", ".", "Event" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/events.py#L167-L183
train
robhowley/nhlscrapi
nhlscrapi/games/toi.py
TOI.home_shift_summ
def home_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_home: self.__wrapped_home = self.__wrap(self._home.by_player) return self.__wrapped_home
python
def home_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_home: self.__wrapped_home = self.__wrap(self._home.by_player) return self.__wrapped_home
[ "def", "home_shift_summ", "(", "self", ")", ":", "if", "not", "self", ".", "__wrapped_home", ":", "self", ".", "__wrapped_home", "=", "self", ".", "__wrap", "(", "self", ".", "_home", ".", "by_player", ")", "return", "self", ".", "__wrapped_home" ]
:returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }``
[ ":", "returns", ":", ":", "py", ":", "class", ":", ".", "ShiftSummary", "by", "player", "for", "the", "home", "team", ":", "rtype", ":", "dict", "{", "player_num", ":", "shift_summary_obj", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/toi.py#L88-L96
train
robhowley/nhlscrapi
nhlscrapi/games/toi.py
TOI.away_shift_summ
def away_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the away team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_away: self.__wrapped_away = self.__wrap(self._away.by_player) return self.__wrapped_away
python
def away_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the away team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_away: self.__wrapped_away = self.__wrap(self._away.by_player) return self.__wrapped_away
[ "def", "away_shift_summ", "(", "self", ")", ":", "if", "not", "self", ".", "__wrapped_away", ":", "self", ".", "__wrapped_away", "=", "self", ".", "__wrap", "(", "self", ".", "_away", ".", "by_player", ")", "return", "self", ".", "__wrapped_away" ]
:returns: :py:class:`.ShiftSummary` by player for the away team :rtype: dict ``{ player_num: shift_summary_obj }``
[ ":", "returns", ":", ":", "py", ":", "class", ":", ".", "ShiftSummary", "by", "player", "for", "the", "away", "team", ":", "rtype", ":", "dict", "{", "player_num", ":", "shift_summary_obj", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/toi.py#L100-L108
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rosterrep.py
RosterRep.parse
def parse(self): """ Retreive and parse Play by Play data for the given :py:class:`nhlscrapi.games.game.GameKey`` :returns: ``self`` on success, ``None`` otherwise """ try: return super(RosterRep, self).parse() \ .parse_rosters() \ .parse_scratches() \ .parse_coaches() \ .parse_officials() except: return None
python
def parse(self): """ Retreive and parse Play by Play data for the given :py:class:`nhlscrapi.games.game.GameKey`` :returns: ``self`` on success, ``None`` otherwise """ try: return super(RosterRep, self).parse() \ .parse_rosters() \ .parse_scratches() \ .parse_coaches() \ .parse_officials() except: return None
[ "def", "parse", "(", "self", ")", ":", "try", ":", "return", "super", "(", "RosterRep", ",", "self", ")", ".", "parse", "(", ")", ".", "parse_rosters", "(", ")", ".", "parse_scratches", "(", ")", ".", "parse_coaches", "(", ")", ".", "parse_officials", "(", ")", "except", ":", "return", "None" ]
Retreive and parse Play by Play data for the given :py:class:`nhlscrapi.games.game.GameKey`` :returns: ``self`` on success, ``None`` otherwise
[ "Retreive", "and", "parse", "Play", "by", "Play", "data", "for", "the", "given", ":", "py", ":", "class", ":", "nhlscrapi", ".", "games", ".", "game", ".", "GameKey" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rosterrep.py#L65-L79
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rosterrep.py
RosterRep.parse_rosters
def parse_rosters(self): """ Parse the home and away game rosters :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['home', 'away']: self.rosters[t] = self.__clean_pl_block(self.__blocks[t]) return self if self.rosters else None
python
def parse_rosters(self): """ Parse the home and away game rosters :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['home', 'away']: self.rosters[t] = self.__clean_pl_block(self.__blocks[t]) return self if self.rosters else None
[ "def", "parse_rosters", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "if", "not", "self", ".", "__blocks", ":", "self", ".", "__pl_blocks", "(", "lx_doc", ")", "for", "t", "in", "[", "'home'", ",", "'away'", "]", ":", "self", ".", "rosters", "[", "t", "]", "=", "self", ".", "__clean_pl_block", "(", "self", ".", "__blocks", "[", "t", "]", ")", "return", "self", "if", "self", ".", "rosters", "else", "None" ]
Parse the home and away game rosters :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "the", "home", "and", "away", "game", "rosters" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rosterrep.py#L82-L96
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rosterrep.py
RosterRep.parse_scratches
def parse_scratches(self): """ Parse the home and away healthy scratches :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['aw_scr', 'h_scr']: ix = 'away' if t == 'aw_scr' else 'home' self.scratches[ix] = self.__clean_pl_block(self.__blocks[t]) return self if self.scratches else None
python
def parse_scratches(self): """ Parse the home and away healthy scratches :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['aw_scr', 'h_scr']: ix = 'away' if t == 'aw_scr' else 'home' self.scratches[ix] = self.__clean_pl_block(self.__blocks[t]) return self if self.scratches else None
[ "def", "parse_scratches", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "if", "not", "self", ".", "__blocks", ":", "self", ".", "__pl_blocks", "(", "lx_doc", ")", "for", "t", "in", "[", "'aw_scr'", ",", "'h_scr'", "]", ":", "ix", "=", "'away'", "if", "t", "==", "'aw_scr'", "else", "'home'", "self", ".", "scratches", "[", "ix", "]", "=", "self", ".", "__clean_pl_block", "(", "self", ".", "__blocks", "[", "t", "]", ")", "return", "self", "if", "self", ".", "scratches", "else", "None" ]
Parse the home and away healthy scratches :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "the", "home", "and", "away", "healthy", "scratches" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rosterrep.py#L99-L113
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rosterrep.py
RosterRep.parse_coaches
def parse_coaches(self): """ Parse the home and away coaches :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() tr = lx_doc.xpath('//tr[@id="HeadCoaches"]')[0] for i, td in enumerate(tr): txt = td.xpath('.//text()') txt = ex_junk(txt, ['\n','\r']) team = 'away' if i == 0 else 'home' self.coaches[team] = txt[0] return self if self.coaches else None
python
def parse_coaches(self): """ Parse the home and away coaches :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() tr = lx_doc.xpath('//tr[@id="HeadCoaches"]')[0] for i, td in enumerate(tr): txt = td.xpath('.//text()') txt = ex_junk(txt, ['\n','\r']) team = 'away' if i == 0 else 'home' self.coaches[team] = txt[0] return self if self.coaches else None
[ "def", "parse_coaches", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "tr", "=", "lx_doc", ".", "xpath", "(", "'//tr[@id=\"HeadCoaches\"]'", ")", "[", "0", "]", "for", "i", ",", "td", "in", "enumerate", "(", "tr", ")", ":", "txt", "=", "td", ".", "xpath", "(", "'.//text()'", ")", "txt", "=", "ex_junk", "(", "txt", ",", "[", "'\\n'", ",", "'\\r'", "]", ")", "team", "=", "'away'", "if", "i", "==", "0", "else", "'home'", "self", ".", "coaches", "[", "team", "]", "=", "txt", "[", "0", "]", "return", "self", "if", "self", ".", "coaches", "else", "None" ]
Parse the home and away coaches :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "the", "home", "and", "away", "coaches" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rosterrep.py#L116-L131
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rosterrep.py
RosterRep.parse_officials
def parse_officials(self): """ Parse the officials :returns: ``self`` on success, ``None`` otherwise """ # begin proper body of method lx_doc = self.html_doc() off_parser = opm(self.game_key.season) self.officials = off_parser(lx_doc) return self if self.officials else None
python
def parse_officials(self): """ Parse the officials :returns: ``self`` on success, ``None`` otherwise """ # begin proper body of method lx_doc = self.html_doc() off_parser = opm(self.game_key.season) self.officials = off_parser(lx_doc) return self if self.officials else None
[ "def", "parse_officials", "(", "self", ")", ":", "# begin proper body of method", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "off_parser", "=", "opm", "(", "self", ".", "game_key", ".", "season", ")", "self", ".", "officials", "=", "off_parser", "(", "lx_doc", ")", "return", "self", "if", "self", ".", "officials", "else", "None" ]
Parse the officials :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "the", "officials" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rosterrep.py#L134-L145
train
michaelherold/pyIsEmail
pyisemail/__init__.py
is_email
def is_email(address, check_dns=False, diagnose=False): """Validate an email address. Keyword arguments: address --- the email address as a string check_dns --- flag for whether to check the DNS status of the domain diagnose --- flag for whether to return True/False or a Diagnosis """ threshold = BaseDiagnosis.CATEGORIES["THRESHOLD"] d = ParserValidator().is_email(address, True) if check_dns is True and d < BaseDiagnosis.CATEGORIES["DNSWARN"]: threshold = BaseDiagnosis.CATEGORIES["VALID"] d = max(d, DNSValidator().is_valid(address.split("@")[1], True)) return d if diagnose else d < threshold
python
def is_email(address, check_dns=False, diagnose=False): """Validate an email address. Keyword arguments: address --- the email address as a string check_dns --- flag for whether to check the DNS status of the domain diagnose --- flag for whether to return True/False or a Diagnosis """ threshold = BaseDiagnosis.CATEGORIES["THRESHOLD"] d = ParserValidator().is_email(address, True) if check_dns is True and d < BaseDiagnosis.CATEGORIES["DNSWARN"]: threshold = BaseDiagnosis.CATEGORIES["VALID"] d = max(d, DNSValidator().is_valid(address.split("@")[1], True)) return d if diagnose else d < threshold
[ "def", "is_email", "(", "address", ",", "check_dns", "=", "False", ",", "diagnose", "=", "False", ")", ":", "threshold", "=", "BaseDiagnosis", ".", "CATEGORIES", "[", "\"THRESHOLD\"", "]", "d", "=", "ParserValidator", "(", ")", ".", "is_email", "(", "address", ",", "True", ")", "if", "check_dns", "is", "True", "and", "d", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "\"DNSWARN\"", "]", ":", "threshold", "=", "BaseDiagnosis", ".", "CATEGORIES", "[", "\"VALID\"", "]", "d", "=", "max", "(", "d", ",", "DNSValidator", "(", ")", ".", "is_valid", "(", "address", ".", "split", "(", "\"@\"", ")", "[", "1", "]", ",", "True", ")", ")", "return", "d", "if", "diagnose", "else", "d", "<", "threshold" ]
Validate an email address. Keyword arguments: address --- the email address as a string check_dns --- flag for whether to check the DNS status of the domain diagnose --- flag for whether to return True/False or a Diagnosis
[ "Validate", "an", "email", "address", "." ]
dd42d6425c59e5061fc214d42672210dccc64cf5
https://github.com/michaelherold/pyIsEmail/blob/dd42d6425c59e5061fc214d42672210dccc64cf5/pyisemail/__init__.py#L12-L28
train
robhowley/nhlscrapi
nhlscrapi/games/repscrwrap.py
dispatch_loader
def dispatch_loader(scraper, loader_name): """ Decorator that enforces one time loading for scrapers. The one time loading is applied to partial loaders, e.g. only parse and load the home team roster once. This is not meant to be used directly. :param scraper: property name (string) containing an object of type :py:class:`scrapr.ReportLoader` :param loader_name: name of method that does the scraping/parsing :returns: function wrapper """ l = '.'.join([scraper, loader_name]) def wrapper(f): @wraps(f) def wrapped(self, *f_args, **f_kwargs): if not hasattr(self, '_loaded'): self._loaded = { } already_loaded = self._loaded.setdefault(l, False) if not already_loaded: attr = getattr(self, scraper) self._loaded[l] = getattr(attr, loader_name)() is not None return f(self, *f_args, **f_kwargs) return wrapped return wrapper
python
def dispatch_loader(scraper, loader_name): """ Decorator that enforces one time loading for scrapers. The one time loading is applied to partial loaders, e.g. only parse and load the home team roster once. This is not meant to be used directly. :param scraper: property name (string) containing an object of type :py:class:`scrapr.ReportLoader` :param loader_name: name of method that does the scraping/parsing :returns: function wrapper """ l = '.'.join([scraper, loader_name]) def wrapper(f): @wraps(f) def wrapped(self, *f_args, **f_kwargs): if not hasattr(self, '_loaded'): self._loaded = { } already_loaded = self._loaded.setdefault(l, False) if not already_loaded: attr = getattr(self, scraper) self._loaded[l] = getattr(attr, loader_name)() is not None return f(self, *f_args, **f_kwargs) return wrapped return wrapper
[ "def", "dispatch_loader", "(", "scraper", ",", "loader_name", ")", ":", "l", "=", "'.'", ".", "join", "(", "[", "scraper", ",", "loader_name", "]", ")", "def", "wrapper", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "self", ",", "*", "f_args", ",", "*", "*", "f_kwargs", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_loaded'", ")", ":", "self", ".", "_loaded", "=", "{", "}", "already_loaded", "=", "self", ".", "_loaded", ".", "setdefault", "(", "l", ",", "False", ")", "if", "not", "already_loaded", ":", "attr", "=", "getattr", "(", "self", ",", "scraper", ")", "self", ".", "_loaded", "[", "l", "]", "=", "getattr", "(", "attr", ",", "loader_name", ")", "(", ")", "is", "not", "None", "return", "f", "(", "self", ",", "*", "f_args", ",", "*", "*", "f_kwargs", ")", "return", "wrapped", "return", "wrapper" ]
Decorator that enforces one time loading for scrapers. The one time loading is applied to partial loaders, e.g. only parse and load the home team roster once. This is not meant to be used directly. :param scraper: property name (string) containing an object of type :py:class:`scrapr.ReportLoader` :param loader_name: name of method that does the scraping/parsing :returns: function wrapper
[ "Decorator", "that", "enforces", "one", "time", "loading", "for", "scrapers", ".", "The", "one", "time", "loading", "is", "applied", "to", "partial", "loaders", "e", ".", "g", ".", "only", "parse", "and", "load", "the", "home", "team", "roster", "once", ".", "This", "is", "not", "meant", "to", "be", "used", "directly", ".", ":", "param", "scraper", ":", "property", "name", "(", "string", ")", "containing", "an", "object", "of", "type", ":", "py", ":", "class", ":", "scrapr", ".", "ReportLoader", ":", "param", "loader_name", ":", "name", "of", "method", "that", "does", "the", "scraping", "/", "parsing", ":", "returns", ":", "function", "wrapper" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/repscrwrap.py#L5-L28
train
robhowley/nhlscrapi
nhlscrapi/scrapr/toirep.py
TOIRepBase.parse_shifts
def parse_shifts(self): """ Parse shifts from TOI report :returns: self if successfule else None """ lx_doc = self.html_doc() pl_heads = lx_doc.xpath('//td[contains(@class, "playerHeading")]') for pl in pl_heads: sh_sum = { } pl_text = pl.xpath('text()')[0] num_name = pl_text.replace(',','').split(' ') sh_sum['player_num'] = int(num_name[0]) if num_name[0].isdigit() else -1 sh_sum['player_name'] = { 'first': num_name[2], 'last': num_name[1] } first_shift = pl.xpath('../following-sibling::tr')[1] sh_sum['shifts'], last_shift = self.__player_shifts(first_shift) while ('Per' not in last_shift.xpath('.//text()')): last_shift = last_shift.xpath('following-sibling::tr')[0] per_summ = last_shift.xpath('.//tr')[0] sh_sum['by_period'], last_sum = self.__get_by_per_summ(per_summ) self.by_player[sh_sum['player_num']] = sh_sum return self if self.by_player else None
python
def parse_shifts(self): """ Parse shifts from TOI report :returns: self if successfule else None """ lx_doc = self.html_doc() pl_heads = lx_doc.xpath('//td[contains(@class, "playerHeading")]') for pl in pl_heads: sh_sum = { } pl_text = pl.xpath('text()')[0] num_name = pl_text.replace(',','').split(' ') sh_sum['player_num'] = int(num_name[0]) if num_name[0].isdigit() else -1 sh_sum['player_name'] = { 'first': num_name[2], 'last': num_name[1] } first_shift = pl.xpath('../following-sibling::tr')[1] sh_sum['shifts'], last_shift = self.__player_shifts(first_shift) while ('Per' not in last_shift.xpath('.//text()')): last_shift = last_shift.xpath('following-sibling::tr')[0] per_summ = last_shift.xpath('.//tr')[0] sh_sum['by_period'], last_sum = self.__get_by_per_summ(per_summ) self.by_player[sh_sum['player_num']] = sh_sum return self if self.by_player else None
[ "def", "parse_shifts", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "pl_heads", "=", "lx_doc", ".", "xpath", "(", "'//td[contains(@class, \"playerHeading\")]'", ")", "for", "pl", "in", "pl_heads", ":", "sh_sum", "=", "{", "}", "pl_text", "=", "pl", ".", "xpath", "(", "'text()'", ")", "[", "0", "]", "num_name", "=", "pl_text", ".", "replace", "(", "','", ",", "''", ")", ".", "split", "(", "' '", ")", "sh_sum", "[", "'player_num'", "]", "=", "int", "(", "num_name", "[", "0", "]", ")", "if", "num_name", "[", "0", "]", ".", "isdigit", "(", ")", "else", "-", "1", "sh_sum", "[", "'player_name'", "]", "=", "{", "'first'", ":", "num_name", "[", "2", "]", ",", "'last'", ":", "num_name", "[", "1", "]", "}", "first_shift", "=", "pl", ".", "xpath", "(", "'../following-sibling::tr'", ")", "[", "1", "]", "sh_sum", "[", "'shifts'", "]", ",", "last_shift", "=", "self", ".", "__player_shifts", "(", "first_shift", ")", "while", "(", "'Per'", "not", "in", "last_shift", ".", "xpath", "(", "'.//text()'", ")", ")", ":", "last_shift", "=", "last_shift", ".", "xpath", "(", "'following-sibling::tr'", ")", "[", "0", "]", "per_summ", "=", "last_shift", ".", "xpath", "(", "'.//tr'", ")", "[", "0", "]", "sh_sum", "[", "'by_period'", "]", ",", "last_sum", "=", "self", ".", "__get_by_per_summ", "(", "per_summ", ")", "self", ".", "by_player", "[", "sh_sum", "[", "'player_num'", "]", "]", "=", "sh_sum", "return", "self", "if", "self", ".", "by_player", "else", "None" ]
Parse shifts from TOI report :returns: self if successfule else None
[ "Parse", "shifts", "from", "TOI", "report", ":", "returns", ":", "self", "if", "successfule", "else", "None" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/toirep.py#L130-L159
train
michaelherold/pyIsEmail
pyisemail/validators/dns_validator.py
DNSValidator.is_valid
def is_valid(self, domain, diagnose=False): """Check whether a domain has a valid MX or A record. Keyword arguments: domain --- the domain to check diagnose --- flag to report a diagnosis or a boolean (default False) """ return_status = [ValidDiagnosis()] dns_checked = False # http://tools.ietf.org/html/rfc5321#section-2.3.5 # Names that can be resolved to MX RRs or address (i.e., A or AAAA) # RRs (as discussed in Section 5) are permitted, as are CNAME RRs # whose targets can be resolved, in turn, to MX or address RRs. # # http://tools.ietf.org/html/rfc5321#section-5.1 # The lookup first attempts to locate an MX record associated with # the name. If a CNAME record is found, the resulting name is # processed as if it were the initial name. ... If an empty list of # MXs is returned, the address is treated as if it was associated # with an implicit MX RR, with a preference of 0, pointing to that # host. # # is_email() author's note: We will regard the existence of a CNAME to # be sufficient evidence of the domain's existence. For performance # reasons we will not repeat the DNS lookup for the CNAME's target, but # we will raise a warning because we didn't immediately find an MX # record. try: dns.resolver.query(domain, 'MX') dns_checked = True except (dns.resolver.NXDOMAIN, dns.name.NameTooLong): # Domain can't be found in DNS return_status.append(DNSDiagnosis('NO_RECORD')) # Since dns.resolver gives more information than the PHP analog, we # can say that TLDs that throw an NXDOMAIN or NameTooLong error # have been checked if len(domain.split('.')) == 1: dns_checked = True except dns.resolver.NoAnswer: # MX-record for domain can't be found return_status.append(DNSDiagnosis('NO_MX_RECORD')) try: # TODO: See if we can/need to narrow to A / CNAME dns.resolver.query(domain) except dns.resolver.NoAnswer: # No usable records for the domain can be found return_status.append(DNSDiagnosis('NO_RECORD')) except dns.resolver.NoNameservers: return_status.append(DNSDiagnosis('NO_NAMESERVERS')) except (dns.exception.Timeout, dns.resolver.Timeout): return_status.append(DNSDiagnosis('DNS_TIMEDOUT')) # Check for TLD addresses # ----------------------- # TLD addresses are specifically allowed in RFC 5321 but they are # unusual to say the least. We will allocate a separate # status to these addresses on the basis that they are more likely # to be typos than genuine addresses (unless we've already # established that the domain does have an MX record) # # http://tools.ietf.org/html/rfc5321#section-2.3.5 # In the case of a top-level domain used by itself in an address, a # single string is used without any dots. This makes the requirement, # described in more detail below, that only fully-qualified domain # names appear in SMTP transactions on the public Internet, # particularly important where top-level domains are involved. # # TLD format # ---------- # The format of TLDs has changed a number of times. The standards # used by IANA have been largely ignored by ICANN, leading to # confusion over the standards being followed. These are not defined # anywhere, except as a general component of a DNS host name (a label). # However, this could potentially lead to 123.123.123.123 being a # valid DNS name (rather than an IP address) and thereby creating # an ambiguity. The most authoritative statement on TLD formats that # the author can find is in a (rejected!) erratum to RFC 1123 # submitted by John Klensin, the author of RFC 5321: # # http://www.rfc-editor.org/errata_search.php?rfc=1123&eid=1353 # However, a valid host name can never have the dotted-decimal # form #.#.#.#, since this change does not permit the highest-level # component label to start with a digit even if it is not # all-numeric. if not dns_checked: atom_list = domain.split(".") if len(atom_list) == 1: return_status.append(RFC5321Diagnosis('TLD')) try: float(atom_list[len(atom_list)-1][0]) return_status.append(RFC5321Diagnosis('TLDNUMERIC')) except ValueError: pass final_status = max(return_status) return final_status if diagnose else final_status == ValidDiagnosis()
python
def is_valid(self, domain, diagnose=False): """Check whether a domain has a valid MX or A record. Keyword arguments: domain --- the domain to check diagnose --- flag to report a diagnosis or a boolean (default False) """ return_status = [ValidDiagnosis()] dns_checked = False # http://tools.ietf.org/html/rfc5321#section-2.3.5 # Names that can be resolved to MX RRs or address (i.e., A or AAAA) # RRs (as discussed in Section 5) are permitted, as are CNAME RRs # whose targets can be resolved, in turn, to MX or address RRs. # # http://tools.ietf.org/html/rfc5321#section-5.1 # The lookup first attempts to locate an MX record associated with # the name. If a CNAME record is found, the resulting name is # processed as if it were the initial name. ... If an empty list of # MXs is returned, the address is treated as if it was associated # with an implicit MX RR, with a preference of 0, pointing to that # host. # # is_email() author's note: We will regard the existence of a CNAME to # be sufficient evidence of the domain's existence. For performance # reasons we will not repeat the DNS lookup for the CNAME's target, but # we will raise a warning because we didn't immediately find an MX # record. try: dns.resolver.query(domain, 'MX') dns_checked = True except (dns.resolver.NXDOMAIN, dns.name.NameTooLong): # Domain can't be found in DNS return_status.append(DNSDiagnosis('NO_RECORD')) # Since dns.resolver gives more information than the PHP analog, we # can say that TLDs that throw an NXDOMAIN or NameTooLong error # have been checked if len(domain.split('.')) == 1: dns_checked = True except dns.resolver.NoAnswer: # MX-record for domain can't be found return_status.append(DNSDiagnosis('NO_MX_RECORD')) try: # TODO: See if we can/need to narrow to A / CNAME dns.resolver.query(domain) except dns.resolver.NoAnswer: # No usable records for the domain can be found return_status.append(DNSDiagnosis('NO_RECORD')) except dns.resolver.NoNameservers: return_status.append(DNSDiagnosis('NO_NAMESERVERS')) except (dns.exception.Timeout, dns.resolver.Timeout): return_status.append(DNSDiagnosis('DNS_TIMEDOUT')) # Check for TLD addresses # ----------------------- # TLD addresses are specifically allowed in RFC 5321 but they are # unusual to say the least. We will allocate a separate # status to these addresses on the basis that they are more likely # to be typos than genuine addresses (unless we've already # established that the domain does have an MX record) # # http://tools.ietf.org/html/rfc5321#section-2.3.5 # In the case of a top-level domain used by itself in an address, a # single string is used without any dots. This makes the requirement, # described in more detail below, that only fully-qualified domain # names appear in SMTP transactions on the public Internet, # particularly important where top-level domains are involved. # # TLD format # ---------- # The format of TLDs has changed a number of times. The standards # used by IANA have been largely ignored by ICANN, leading to # confusion over the standards being followed. These are not defined # anywhere, except as a general component of a DNS host name (a label). # However, this could potentially lead to 123.123.123.123 being a # valid DNS name (rather than an IP address) and thereby creating # an ambiguity. The most authoritative statement on TLD formats that # the author can find is in a (rejected!) erratum to RFC 1123 # submitted by John Klensin, the author of RFC 5321: # # http://www.rfc-editor.org/errata_search.php?rfc=1123&eid=1353 # However, a valid host name can never have the dotted-decimal # form #.#.#.#, since this change does not permit the highest-level # component label to start with a digit even if it is not # all-numeric. if not dns_checked: atom_list = domain.split(".") if len(atom_list) == 1: return_status.append(RFC5321Diagnosis('TLD')) try: float(atom_list[len(atom_list)-1][0]) return_status.append(RFC5321Diagnosis('TLDNUMERIC')) except ValueError: pass final_status = max(return_status) return final_status if diagnose else final_status == ValidDiagnosis()
[ "def", "is_valid", "(", "self", ",", "domain", ",", "diagnose", "=", "False", ")", ":", "return_status", "=", "[", "ValidDiagnosis", "(", ")", "]", "dns_checked", "=", "False", "# http://tools.ietf.org/html/rfc5321#section-2.3.5", "# Names that can be resolved to MX RRs or address (i.e., A or AAAA)", "# RRs (as discussed in Section 5) are permitted, as are CNAME RRs", "# whose targets can be resolved, in turn, to MX or address RRs.", "#", "# http://tools.ietf.org/html/rfc5321#section-5.1", "# The lookup first attempts to locate an MX record associated with", "# the name. If a CNAME record is found, the resulting name is", "# processed as if it were the initial name. ... If an empty list of", "# MXs is returned, the address is treated as if it was associated", "# with an implicit MX RR, with a preference of 0, pointing to that", "# host.", "#", "# is_email() author's note: We will regard the existence of a CNAME to", "# be sufficient evidence of the domain's existence. For performance", "# reasons we will not repeat the DNS lookup for the CNAME's target, but", "# we will raise a warning because we didn't immediately find an MX", "# record.", "try", ":", "dns", ".", "resolver", ".", "query", "(", "domain", ",", "'MX'", ")", "dns_checked", "=", "True", "except", "(", "dns", ".", "resolver", ".", "NXDOMAIN", ",", "dns", ".", "name", ".", "NameTooLong", ")", ":", "# Domain can't be found in DNS", "return_status", ".", "append", "(", "DNSDiagnosis", "(", "'NO_RECORD'", ")", ")", "# Since dns.resolver gives more information than the PHP analog, we", "# can say that TLDs that throw an NXDOMAIN or NameTooLong error", "# have been checked", "if", "len", "(", "domain", ".", "split", "(", "'.'", ")", ")", "==", "1", ":", "dns_checked", "=", "True", "except", "dns", ".", "resolver", ".", "NoAnswer", ":", "# MX-record for domain can't be found", "return_status", ".", "append", "(", "DNSDiagnosis", "(", "'NO_MX_RECORD'", ")", ")", "try", ":", "# TODO: See if we can/need to narrow to A / CNAME", "dns", ".", "resolver", ".", "query", "(", "domain", ")", "except", "dns", ".", "resolver", ".", "NoAnswer", ":", "# No usable records for the domain can be found", "return_status", ".", "append", "(", "DNSDiagnosis", "(", "'NO_RECORD'", ")", ")", "except", "dns", ".", "resolver", ".", "NoNameservers", ":", "return_status", ".", "append", "(", "DNSDiagnosis", "(", "'NO_NAMESERVERS'", ")", ")", "except", "(", "dns", ".", "exception", ".", "Timeout", ",", "dns", ".", "resolver", ".", "Timeout", ")", ":", "return_status", ".", "append", "(", "DNSDiagnosis", "(", "'DNS_TIMEDOUT'", ")", ")", "# Check for TLD addresses", "# -----------------------", "# TLD addresses are specifically allowed in RFC 5321 but they are", "# unusual to say the least. We will allocate a separate", "# status to these addresses on the basis that they are more likely", "# to be typos than genuine addresses (unless we've already", "# established that the domain does have an MX record)", "#", "# http://tools.ietf.org/html/rfc5321#section-2.3.5", "# In the case of a top-level domain used by itself in an address, a", "# single string is used without any dots. This makes the requirement,", "# described in more detail below, that only fully-qualified domain", "# names appear in SMTP transactions on the public Internet,", "# particularly important where top-level domains are involved.", "#", "# TLD format", "# ----------", "# The format of TLDs has changed a number of times. The standards", "# used by IANA have been largely ignored by ICANN, leading to", "# confusion over the standards being followed. These are not defined", "# anywhere, except as a general component of a DNS host name (a label).", "# However, this could potentially lead to 123.123.123.123 being a", "# valid DNS name (rather than an IP address) and thereby creating", "# an ambiguity. The most authoritative statement on TLD formats that", "# the author can find is in a (rejected!) erratum to RFC 1123", "# submitted by John Klensin, the author of RFC 5321:", "#", "# http://www.rfc-editor.org/errata_search.php?rfc=1123&eid=1353", "# However, a valid host name can never have the dotted-decimal", "# form #.#.#.#, since this change does not permit the highest-level", "# component label to start with a digit even if it is not", "# all-numeric.", "if", "not", "dns_checked", ":", "atom_list", "=", "domain", ".", "split", "(", "\".\"", ")", "if", "len", "(", "atom_list", ")", "==", "1", ":", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'TLD'", ")", ")", "try", ":", "float", "(", "atom_list", "[", "len", "(", "atom_list", ")", "-", "1", "]", "[", "0", "]", ")", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'TLDNUMERIC'", ")", ")", "except", "ValueError", ":", "pass", "final_status", "=", "max", "(", "return_status", ")", "return", "final_status", "if", "diagnose", "else", "final_status", "==", "ValidDiagnosis", "(", ")" ]
Check whether a domain has a valid MX or A record. Keyword arguments: domain --- the domain to check diagnose --- flag to report a diagnosis or a boolean (default False)
[ "Check", "whether", "a", "domain", "has", "a", "valid", "MX", "or", "A", "record", "." ]
dd42d6425c59e5061fc214d42672210dccc64cf5
https://github.com/michaelherold/pyIsEmail/blob/dd42d6425c59e5061fc214d42672210dccc64cf5/pyisemail/validators/dns_validator.py#L8-L111
train
robhowley/nhlscrapi
nhlscrapi/scrapr/reportloader.py
ReportLoader.html_doc
def html_doc(self): """ :returns: the lxml processed html document :rtype: ``lxml.html.document_fromstring`` output """ if self.__lx_doc is None: cn = NHLCn() if hasattr(cn, self.report_type): html = getattr(cn, self.report_type)(self.game_key) else: raise ValueError('Invalid report type: %s' % self.report_type) if cn.req_err is None: self.__lx_doc = fromstring(html) else: self.req_err = cn.req_err return self.__lx_doc
python
def html_doc(self): """ :returns: the lxml processed html document :rtype: ``lxml.html.document_fromstring`` output """ if self.__lx_doc is None: cn = NHLCn() if hasattr(cn, self.report_type): html = getattr(cn, self.report_type)(self.game_key) else: raise ValueError('Invalid report type: %s' % self.report_type) if cn.req_err is None: self.__lx_doc = fromstring(html) else: self.req_err = cn.req_err return self.__lx_doc
[ "def", "html_doc", "(", "self", ")", ":", "if", "self", ".", "__lx_doc", "is", "None", ":", "cn", "=", "NHLCn", "(", ")", "if", "hasattr", "(", "cn", ",", "self", ".", "report_type", ")", ":", "html", "=", "getattr", "(", "cn", ",", "self", ".", "report_type", ")", "(", "self", ".", "game_key", ")", "else", ":", "raise", "ValueError", "(", "'Invalid report type: %s'", "%", "self", ".", "report_type", ")", "if", "cn", ".", "req_err", "is", "None", ":", "self", ".", "__lx_doc", "=", "fromstring", "(", "html", ")", "else", ":", "self", ".", "req_err", "=", "cn", ".", "req_err", "return", "self", ".", "__lx_doc" ]
:returns: the lxml processed html document :rtype: ``lxml.html.document_fromstring`` output
[ ":", "returns", ":", "the", "lxml", "processed", "html", "document", ":", "rtype", ":", "lxml", ".", "html", ".", "document_fromstring", "output" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/reportloader.py#L52-L71
train
robhowley/nhlscrapi
nhlscrapi/scrapr/reportloader.py
ReportLoader.parse_matchup
def parse_matchup(self): """ Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None`` """ lx_doc = self.html_doc() try: if not self.matchup: self.matchup = self._fill_meta(lx_doc) return self except: return None
python
def parse_matchup(self): """ Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None`` """ lx_doc = self.html_doc() try: if not self.matchup: self.matchup = self._fill_meta(lx_doc) return self except: return None
[ "def", "parse_matchup", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "try", ":", "if", "not", "self", ".", "matchup", ":", "self", ".", "matchup", "=", "self", ".", "_fill_meta", "(", "lx_doc", ")", "return", "self", "except", ":", "return", "None" ]
Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None``
[ "Parse", "the", "banner", "matchup", "meta", "info", "for", "the", "game", ".", ":", "returns", ":", "self", "on", "success", "or", "None" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/reportloader.py#L74-L86
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rtss.py
RTSS.parse_plays_stream
def parse_plays_stream(self): """Generate and yield a stream of parsed plays. Useful for per play processing.""" lx_doc = self.html_doc() if lx_doc is not None: parser = PlayParser(self.game_key.season, self.game_key.game_type) plays = lx_doc.xpath('//tr[@class = "evenColor"]') for p in plays: p_obj = parser.build_play(p) self.plays.append(p_obj) yield p_obj
python
def parse_plays_stream(self): """Generate and yield a stream of parsed plays. Useful for per play processing.""" lx_doc = self.html_doc() if lx_doc is not None: parser = PlayParser(self.game_key.season, self.game_key.game_type) plays = lx_doc.xpath('//tr[@class = "evenColor"]') for p in plays: p_obj = parser.build_play(p) self.plays.append(p_obj) yield p_obj
[ "def", "parse_plays_stream", "(", "self", ")", ":", "lx_doc", "=", "self", ".", "html_doc", "(", ")", "if", "lx_doc", "is", "not", "None", ":", "parser", "=", "PlayParser", "(", "self", ".", "game_key", ".", "season", ",", "self", ".", "game_key", ".", "game_type", ")", "plays", "=", "lx_doc", ".", "xpath", "(", "'//tr[@class = \"evenColor\"]'", ")", "for", "p", "in", "plays", ":", "p_obj", "=", "parser", ".", "build_play", "(", "p", ")", "self", ".", "plays", ".", "append", "(", "p_obj", ")", "yield", "p_obj" ]
Generate and yield a stream of parsed plays. Useful for per play processing.
[ "Generate", "and", "yield", "a", "stream", "of", "parsed", "plays", ".", "Useful", "for", "per", "play", "processing", "." ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rtss.py#L48-L59
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rtss.py
PlayParser.ColMap
def ColMap(season): """ Returns a dictionary mapping the type of information in the RTSS play row to the appropriate column number. The column locations pre/post 2008 are different. :param season: int for the season number :returns: mapping of RTSS column to info type :rtype: dict, keys are ``'play_num', 'per', 'str', 'time', 'event', 'desc', 'vis', 'home'`` """ if c.MIN_SEASON <= season <= c.MAX_SEASON: return { "play_num": 0, "per": 1, "str": 2, "time": 3, "event": 4, "desc": 5, "vis": 6, "home": 7 } else: raise ValueError("RTSSCol.MAP(season): Invalid season " + str(season))
python
def ColMap(season): """ Returns a dictionary mapping the type of information in the RTSS play row to the appropriate column number. The column locations pre/post 2008 are different. :param season: int for the season number :returns: mapping of RTSS column to info type :rtype: dict, keys are ``'play_num', 'per', 'str', 'time', 'event', 'desc', 'vis', 'home'`` """ if c.MIN_SEASON <= season <= c.MAX_SEASON: return { "play_num": 0, "per": 1, "str": 2, "time": 3, "event": 4, "desc": 5, "vis": 6, "home": 7 } else: raise ValueError("RTSSCol.MAP(season): Invalid season " + str(season))
[ "def", "ColMap", "(", "season", ")", ":", "if", "c", ".", "MIN_SEASON", "<=", "season", "<=", "c", ".", "MAX_SEASON", ":", "return", "{", "\"play_num\"", ":", "0", ",", "\"per\"", ":", "1", ",", "\"str\"", ":", "2", ",", "\"time\"", ":", "3", ",", "\"event\"", ":", "4", ",", "\"desc\"", ":", "5", ",", "\"vis\"", ":", "6", ",", "\"home\"", ":", "7", "}", "else", ":", "raise", "ValueError", "(", "\"RTSSCol.MAP(season): Invalid season \"", "+", "str", "(", "season", ")", ")" ]
Returns a dictionary mapping the type of information in the RTSS play row to the appropriate column number. The column locations pre/post 2008 are different. :param season: int for the season number :returns: mapping of RTSS column to info type :rtype: dict, keys are ``'play_num', 'per', 'str', 'time', 'event', 'desc', 'vis', 'home'``
[ "Returns", "a", "dictionary", "mapping", "the", "type", "of", "information", "in", "the", "RTSS", "play", "row", "to", "the", "appropriate", "column", "number", ".", "The", "column", "locations", "pre", "/", "post", "2008", "are", "different", ".", ":", "param", "season", ":", "int", "for", "the", "season", "number", ":", "returns", ":", "mapping", "of", "RTSS", "column", "to", "info", "type", ":", "rtype", ":", "dict", "keys", "are", "play_num", "per", "str", "time", "event", "desc", "vis", "home" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rtss.py#L71-L92
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rtss.py
PlayParser.build_play
def build_play(self, pbp_row): """ Parses table row from RTSS. These are the rows tagged with ``<tr class='evenColor' ... >``. Result set contains :py:class:`nhlscrapi.games.playbyplay.Strength` and :py:class:`nhlscrapi.games.events.EventType` objects. Returned play data is in the form .. code:: python { 'play_num': num_of_play 'period': curr_period 'strength': strength_enum 'time': { 'min': min, 'sec': sec } 'vis_on_ice': { 'player_num': player } 'home_on_ice': { 'player_num': player } 'event': event_object } :param pbp_row: table row from RTSS :returns: play data :rtype: dict """ d = pbp_row.findall('./td') c = PlayParser.ColMap(self.season) p = { } to_dig = lambda t: int(t) if t.isdigit() else 0 p['play_num'] = to_int(d[c["play_num"]].text, 0) p['period'] = to_int(d[c["per"]].text, 0) p['strength'] = self.__strength(d[c["str"]].text) time = d[c["time"]].text.split(":") p['time'] = { "min": int(time[0]), "sec": int(time[1]) } skater_tab = d[c["vis"]].xpath("./table") p['vis_on_ice'] = self.__skaters(skater_tab[0][0]) if len(skater_tab) else { } skater_tab = d[c["home"]].xpath("./table") p['home_on_ice'] = self.__skaters(skater_tab[0][0]) if len(skater_tab) else { } p['event'] = event_type_mapper( d[c["event"]].text, period=p['period'], skater_ct=len(p['vis_on_ice']) + len(p['home_on_ice']), game_type=self.game_type ) p['event'].desc = " ".join([t.encode('ascii', 'replace').decode('utf-8') for t in d[c["desc"]].xpath("text()")]) parse_event_desc(p['event'], season=self.season) return p
python
def build_play(self, pbp_row): """ Parses table row from RTSS. These are the rows tagged with ``<tr class='evenColor' ... >``. Result set contains :py:class:`nhlscrapi.games.playbyplay.Strength` and :py:class:`nhlscrapi.games.events.EventType` objects. Returned play data is in the form .. code:: python { 'play_num': num_of_play 'period': curr_period 'strength': strength_enum 'time': { 'min': min, 'sec': sec } 'vis_on_ice': { 'player_num': player } 'home_on_ice': { 'player_num': player } 'event': event_object } :param pbp_row: table row from RTSS :returns: play data :rtype: dict """ d = pbp_row.findall('./td') c = PlayParser.ColMap(self.season) p = { } to_dig = lambda t: int(t) if t.isdigit() else 0 p['play_num'] = to_int(d[c["play_num"]].text, 0) p['period'] = to_int(d[c["per"]].text, 0) p['strength'] = self.__strength(d[c["str"]].text) time = d[c["time"]].text.split(":") p['time'] = { "min": int(time[0]), "sec": int(time[1]) } skater_tab = d[c["vis"]].xpath("./table") p['vis_on_ice'] = self.__skaters(skater_tab[0][0]) if len(skater_tab) else { } skater_tab = d[c["home"]].xpath("./table") p['home_on_ice'] = self.__skaters(skater_tab[0][0]) if len(skater_tab) else { } p['event'] = event_type_mapper( d[c["event"]].text, period=p['period'], skater_ct=len(p['vis_on_ice']) + len(p['home_on_ice']), game_type=self.game_type ) p['event'].desc = " ".join([t.encode('ascii', 'replace').decode('utf-8') for t in d[c["desc"]].xpath("text()")]) parse_event_desc(p['event'], season=self.season) return p
[ "def", "build_play", "(", "self", ",", "pbp_row", ")", ":", "d", "=", "pbp_row", ".", "findall", "(", "'./td'", ")", "c", "=", "PlayParser", ".", "ColMap", "(", "self", ".", "season", ")", "p", "=", "{", "}", "to_dig", "=", "lambda", "t", ":", "int", "(", "t", ")", "if", "t", ".", "isdigit", "(", ")", "else", "0", "p", "[", "'play_num'", "]", "=", "to_int", "(", "d", "[", "c", "[", "\"play_num\"", "]", "]", ".", "text", ",", "0", ")", "p", "[", "'period'", "]", "=", "to_int", "(", "d", "[", "c", "[", "\"per\"", "]", "]", ".", "text", ",", "0", ")", "p", "[", "'strength'", "]", "=", "self", ".", "__strength", "(", "d", "[", "c", "[", "\"str\"", "]", "]", ".", "text", ")", "time", "=", "d", "[", "c", "[", "\"time\"", "]", "]", ".", "text", ".", "split", "(", "\":\"", ")", "p", "[", "'time'", "]", "=", "{", "\"min\"", ":", "int", "(", "time", "[", "0", "]", ")", ",", "\"sec\"", ":", "int", "(", "time", "[", "1", "]", ")", "}", "skater_tab", "=", "d", "[", "c", "[", "\"vis\"", "]", "]", ".", "xpath", "(", "\"./table\"", ")", "p", "[", "'vis_on_ice'", "]", "=", "self", ".", "__skaters", "(", "skater_tab", "[", "0", "]", "[", "0", "]", ")", "if", "len", "(", "skater_tab", ")", "else", "{", "}", "skater_tab", "=", "d", "[", "c", "[", "\"home\"", "]", "]", ".", "xpath", "(", "\"./table\"", ")", "p", "[", "'home_on_ice'", "]", "=", "self", ".", "__skaters", "(", "skater_tab", "[", "0", "]", "[", "0", "]", ")", "if", "len", "(", "skater_tab", ")", "else", "{", "}", "p", "[", "'event'", "]", "=", "event_type_mapper", "(", "d", "[", "c", "[", "\"event\"", "]", "]", ".", "text", ",", "period", "=", "p", "[", "'period'", "]", ",", "skater_ct", "=", "len", "(", "p", "[", "'vis_on_ice'", "]", ")", "+", "len", "(", "p", "[", "'home_on_ice'", "]", ")", ",", "game_type", "=", "self", ".", "game_type", ")", "p", "[", "'event'", "]", ".", "desc", "=", "\" \"", ".", "join", "(", "[", "t", ".", "encode", "(", "'ascii'", ",", "'replace'", ")", ".", "decode", "(", "'utf-8'", ")", "for", "t", "in", "d", "[", "c", "[", "\"desc\"", "]", "]", ".", "xpath", "(", "\"text()\"", ")", "]", ")", "parse_event_desc", "(", "p", "[", "'event'", "]", ",", "season", "=", "self", ".", "season", ")", "return", "p" ]
Parses table row from RTSS. These are the rows tagged with ``<tr class='evenColor' ... >``. Result set contains :py:class:`nhlscrapi.games.playbyplay.Strength` and :py:class:`nhlscrapi.games.events.EventType` objects. Returned play data is in the form .. code:: python { 'play_num': num_of_play 'period': curr_period 'strength': strength_enum 'time': { 'min': min, 'sec': sec } 'vis_on_ice': { 'player_num': player } 'home_on_ice': { 'player_num': player } 'event': event_object } :param pbp_row: table row from RTSS :returns: play data :rtype: dict
[ "Parses", "table", "row", "from", "RTSS", ".", "These", "are", "the", "rows", "tagged", "with", "<tr", "class", "=", "evenColor", "...", ">", ".", "Result", "set", "contains", ":", "py", ":", "class", ":", "nhlscrapi", ".", "games", ".", "playbyplay", ".", "Strength", "and", ":", "py", ":", "class", ":", "nhlscrapi", ".", "games", ".", "events", ".", "EventType", "objects", ".", "Returned", "play", "data", "is", "in", "the", "form", "..", "code", "::", "python", "{", "play_num", ":", "num_of_play", "period", ":", "curr_period", "strength", ":", "strength_enum", "time", ":", "{", "min", ":", "min", "sec", ":", "sec", "}", "vis_on_ice", ":", "{", "player_num", ":", "player", "}", "home_on_ice", ":", "{", "player_num", ":", "player", "}", "event", ":", "event_object", "}", ":", "param", "pbp_row", ":", "table", "row", "from", "RTSS", ":", "returns", ":", "play", "data", ":", "rtype", ":", "dict" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rtss.py#L94-L145
train
robhowley/nhlscrapi
nhlscrapi/scrapr/rtss.py
PlayParser.__skaters
def __skaters(self, tab): """ Constructs dictionary of players on the ice in the provided table at time of play. :param tab: RTSS table of the skaters and goalie on at the time of the play :rtype: dictionary, key = player number, value = [position, name] """ res = { } for td in tab.iterchildren(): if len(td): pl_data = td.xpath("./table/tr") pl = pl_data[0].xpath("./td/font") if pl[0].text.isdigit(): res[int(pl[0].text)] = [s.strip() for s in pl[0].get("title").split("-")][::-1] s = pl[0].get("title").split("-") pos = pl_data[1].getchildren()[0].text return res
python
def __skaters(self, tab): """ Constructs dictionary of players on the ice in the provided table at time of play. :param tab: RTSS table of the skaters and goalie on at the time of the play :rtype: dictionary, key = player number, value = [position, name] """ res = { } for td in tab.iterchildren(): if len(td): pl_data = td.xpath("./table/tr") pl = pl_data[0].xpath("./td/font") if pl[0].text.isdigit(): res[int(pl[0].text)] = [s.strip() for s in pl[0].get("title").split("-")][::-1] s = pl[0].get("title").split("-") pos = pl_data[1].getchildren()[0].text return res
[ "def", "__skaters", "(", "self", ",", "tab", ")", ":", "res", "=", "{", "}", "for", "td", "in", "tab", ".", "iterchildren", "(", ")", ":", "if", "len", "(", "td", ")", ":", "pl_data", "=", "td", ".", "xpath", "(", "\"./table/tr\"", ")", "pl", "=", "pl_data", "[", "0", "]", ".", "xpath", "(", "\"./td/font\"", ")", "if", "pl", "[", "0", "]", ".", "text", ".", "isdigit", "(", ")", ":", "res", "[", "int", "(", "pl", "[", "0", "]", ".", "text", ")", "]", "=", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "pl", "[", "0", "]", ".", "get", "(", "\"title\"", ")", ".", "split", "(", "\"-\"", ")", "]", "[", ":", ":", "-", "1", "]", "s", "=", "pl", "[", "0", "]", ".", "get", "(", "\"title\"", ")", ".", "split", "(", "\"-\"", ")", "pos", "=", "pl_data", "[", "1", "]", ".", "getchildren", "(", ")", "[", "0", "]", ".", "text", "return", "res" ]
Constructs dictionary of players on the ice in the provided table at time of play. :param tab: RTSS table of the skaters and goalie on at the time of the play :rtype: dictionary, key = player number, value = [position, name]
[ "Constructs", "dictionary", "of", "players", "on", "the", "ice", "in", "the", "provided", "table", "at", "time", "of", "play", ".", ":", "param", "tab", ":", "RTSS", "table", "of", "the", "skaters", "and", "goalie", "on", "at", "the", "time", "of", "the", "play", ":", "rtype", ":", "dictionary", "key", "=", "player", "number", "value", "=", "[", "position", "name", "]" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/rtss.py#L147-L166
train
robhowley/nhlscrapi
nhlscrapi/_tools.py
exclude_from
def exclude_from(l, containing = [], equal_to = []): """Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob'] """ cont = lambda li: any(c in li for c in containing) eq = lambda li: any(e == li for e in equal_to) return [li for li in l if not (cont(li) or eq(li))]
python
def exclude_from(l, containing = [], equal_to = []): """Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob'] """ cont = lambda li: any(c in li for c in containing) eq = lambda li: any(e == li for e in equal_to) return [li for li in l if not (cont(li) or eq(li))]
[ "def", "exclude_from", "(", "l", ",", "containing", "=", "[", "]", ",", "equal_to", "=", "[", "]", ")", ":", "cont", "=", "lambda", "li", ":", "any", "(", "c", "in", "li", "for", "c", "in", "containing", ")", "eq", "=", "lambda", "li", ":", "any", "(", "e", "==", "li", "for", "e", "in", "equal_to", ")", "return", "[", "li", "for", "li", "in", "l", "if", "not", "(", "cont", "(", "li", ")", "or", "eq", "(", "li", ")", ")", "]" ]
Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob']
[ "Exclude", "elements", "in", "list", "l", "containing", "any", "elements", "from", "list", "ex", ".", "Example", ":", ">>>", "l", "=", "[", "bob", "r", "rob", "\\", "r", "\\", "r", "\\", "nrobert", "]", ">>>", "containing", "=", "[", "\\", "n", "\\", "r", "]", ">>>", "equal_to", "=", "[", "r", "]", ">>>", "exclude_from", "(", "l", "containing", "equal_to", ")", "[", "bob", "]" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/_tools.py#L23-L35
train
NeuroanatomyAndConnectivity/surfdist
nipype/surfdist_nipype.py
calc_surfdist
def calc_surfdist(surface, labels, annot, reg, origin, target): import nibabel as nib import numpy as np import os from surfdist import load, utils, surfdist import csv """ inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4) """ # Load stuff surf = nib.freesurfer.read_geometry(surface) cort = np.sort(nib.freesurfer.read_label(labels)) src = load.load_freesurfer_label(annot, origin, cort) # Calculate distances dist = surfdist.dist_calc(surf, cort, src) # Project distances to target trg = nib.freesurfer.read_geometry(target)[0] native = nib.freesurfer.read_geometry(reg)[0] idx_trg_to_native = utils.find_node_match(trg, native)[0] # Get indices in trg space distt = dist[idx_trg_to_native] # Write to file and return file handle filename = os.path.join(os.getcwd(),'distances.csv') distt.tofile(filename,sep=",") return filename
python
def calc_surfdist(surface, labels, annot, reg, origin, target): import nibabel as nib import numpy as np import os from surfdist import load, utils, surfdist import csv """ inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4) """ # Load stuff surf = nib.freesurfer.read_geometry(surface) cort = np.sort(nib.freesurfer.read_label(labels)) src = load.load_freesurfer_label(annot, origin, cort) # Calculate distances dist = surfdist.dist_calc(surf, cort, src) # Project distances to target trg = nib.freesurfer.read_geometry(target)[0] native = nib.freesurfer.read_geometry(reg)[0] idx_trg_to_native = utils.find_node_match(trg, native)[0] # Get indices in trg space distt = dist[idx_trg_to_native] # Write to file and return file handle filename = os.path.join(os.getcwd(),'distances.csv') distt.tofile(filename,sep=",") return filename
[ "def", "calc_surfdist", "(", "surface", ",", "labels", ",", "annot", ",", "reg", ",", "origin", ",", "target", ")", ":", "import", "nibabel", "as", "nib", "import", "numpy", "as", "np", "import", "os", "from", "surfdist", "import", "load", ",", "utils", ",", "surfdist", "import", "csv", "# Load stuff", "surf", "=", "nib", ".", "freesurfer", ".", "read_geometry", "(", "surface", ")", "cort", "=", "np", ".", "sort", "(", "nib", ".", "freesurfer", ".", "read_label", "(", "labels", ")", ")", "src", "=", "load", ".", "load_freesurfer_label", "(", "annot", ",", "origin", ",", "cort", ")", "# Calculate distances", "dist", "=", "surfdist", ".", "dist_calc", "(", "surf", ",", "cort", ",", "src", ")", "# Project distances to target", "trg", "=", "nib", ".", "freesurfer", ".", "read_geometry", "(", "target", ")", "[", "0", "]", "native", "=", "nib", ".", "freesurfer", ".", "read_geometry", "(", "reg", ")", "[", "0", "]", "idx_trg_to_native", "=", "utils", ".", "find_node_match", "(", "trg", ",", "native", ")", "[", "0", "]", "# Get indices in trg space ", "distt", "=", "dist", "[", "idx_trg_to_native", "]", "# Write to file and return file handle", "filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'distances.csv'", ")", "distt", ".", "tofile", "(", "filename", ",", "sep", "=", "\",\"", ")", "return", "filename" ]
inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4)
[ "inputs", ":", "surface", "-", "surface", "file", "(", "e", ".", "g", ".", "lh", ".", "pial", "with", "full", "path", ")", "labels", "-", "label", "file", "(", "e", ".", "g", ".", "lh", ".", "cortex", ".", "label", "with", "full", "path", ")", "annot", "-", "annot", "file", "(", "e", ".", "g", ".", "lh", ".", "aparc", ".", "a2009s", ".", "annot", "with", "full", "path", ")", "reg", "-", "registration", "file", "(", "lh", ".", "sphere", ".", "reg", ")", "origin", "-", "the", "label", "from", "which", "we", "calculate", "distances", "target", "-", "target", "surface", "(", "e", ".", "g", ".", "fsaverage4", ")" ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/nipype/surfdist_nipype.py#L20-L56
train
NeuroanatomyAndConnectivity/surfdist
nipype/surfdist_nipype.py
stack_files
def stack_files(files, hemi, source, target): """ This function takes a list of files as input and vstacks them """ import csv import os import numpy as np fname = "sdist_%s_%s_%s.csv" % (hemi, source, target) filename = os.path.join(os.getcwd(),fname) alldist = [] for dfile in files: alldist.append(np.genfromtxt(dfile, delimiter=',')) alldist = np.array(alldist) alldist.tofile(filename,",") return filename
python
def stack_files(files, hemi, source, target): """ This function takes a list of files as input and vstacks them """ import csv import os import numpy as np fname = "sdist_%s_%s_%s.csv" % (hemi, source, target) filename = os.path.join(os.getcwd(),fname) alldist = [] for dfile in files: alldist.append(np.genfromtxt(dfile, delimiter=',')) alldist = np.array(alldist) alldist.tofile(filename,",") return filename
[ "def", "stack_files", "(", "files", ",", "hemi", ",", "source", ",", "target", ")", ":", "import", "csv", "import", "os", "import", "numpy", "as", "np", "fname", "=", "\"sdist_%s_%s_%s.csv\"", "%", "(", "hemi", ",", "source", ",", "target", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "fname", ")", "alldist", "=", "[", "]", "for", "dfile", "in", "files", ":", "alldist", ".", "append", "(", "np", ".", "genfromtxt", "(", "dfile", ",", "delimiter", "=", "','", ")", ")", "alldist", "=", "np", ".", "array", "(", "alldist", ")", "alldist", ".", "tofile", "(", "filename", ",", "\",\"", ")", "return", "filename" ]
This function takes a list of files as input and vstacks them
[ "This", "function", "takes", "a", "list", "of", "files", "as", "input", "and", "vstacks", "them" ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/nipype/surfdist_nipype.py#L58-L77
train
gcushen/mezzanine-api
mezzanine_api/serializers.py
PostOutputSerializer.get_short_url
def get_short_url(self, obj): """ Get short URL of blog post like '/blog/<slug>/' using ``get_absolute_url`` if available. Removes dependency on reverse URLs of Mezzanine views when deploying Mezzanine only as an API backend. """ try: url = obj.get_absolute_url() except NoReverseMatch: url = '/blog/' + obj.slug return url
python
def get_short_url(self, obj): """ Get short URL of blog post like '/blog/<slug>/' using ``get_absolute_url`` if available. Removes dependency on reverse URLs of Mezzanine views when deploying Mezzanine only as an API backend. """ try: url = obj.get_absolute_url() except NoReverseMatch: url = '/blog/' + obj.slug return url
[ "def", "get_short_url", "(", "self", ",", "obj", ")", ":", "try", ":", "url", "=", "obj", ".", "get_absolute_url", "(", ")", "except", "NoReverseMatch", ":", "url", "=", "'/blog/'", "+", "obj", ".", "slug", "return", "url" ]
Get short URL of blog post like '/blog/<slug>/' using ``get_absolute_url`` if available. Removes dependency on reverse URLs of Mezzanine views when deploying Mezzanine only as an API backend.
[ "Get", "short", "URL", "of", "blog", "post", "like", "/", "blog", "/", "<slug", ">", "/", "using", "get_absolute_url", "if", "available", ".", "Removes", "dependency", "on", "reverse", "URLs", "of", "Mezzanine", "views", "when", "deploying", "Mezzanine", "only", "as", "an", "API", "backend", "." ]
8e9d519c0008f46019302fa3d4bf83c372d4ae21
https://github.com/gcushen/mezzanine-api/blob/8e9d519c0008f46019302fa3d4bf83c372d4ae21/mezzanine_api/serializers.py#L253-L262
train
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
FaceOffComparison.head_to_head
def head_to_head(self, home_num, away_num): """ Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } """ if home_num in self.home_fo and away_num in self.home_fo[home_num]['opps']: h_fo = self.home_fo[home_num]['opps'][away_num] a_fo = self.away_fo[away_num]['opps'][home_num] return { 'home': { k: h_fo[k] for k in self.__zones }, 'away': { k: a_fo[k] for k in self.__zones } } else: return { }
python
def head_to_head(self, home_num, away_num): """ Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } """ if home_num in self.home_fo and away_num in self.home_fo[home_num]['opps']: h_fo = self.home_fo[home_num]['opps'][away_num] a_fo = self.away_fo[away_num]['opps'][home_num] return { 'home': { k: h_fo[k] for k in self.__zones }, 'away': { k: a_fo[k] for k in self.__zones } } else: return { }
[ "def", "head_to_head", "(", "self", ",", "home_num", ",", "away_num", ")", ":", "if", "home_num", "in", "self", ".", "home_fo", "and", "away_num", "in", "self", ".", "home_fo", "[", "home_num", "]", "[", "'opps'", "]", ":", "h_fo", "=", "self", ".", "home_fo", "[", "home_num", "]", "[", "'opps'", "]", "[", "away_num", "]", "a_fo", "=", "self", ".", "away_fo", "[", "away_num", "]", "[", "'opps'", "]", "[", "home_num", "]", "return", "{", "'home'", ":", "{", "k", ":", "h_fo", "[", "k", "]", "for", "k", "in", "self", ".", "__zones", "}", ",", "'away'", ":", "{", "k", ":", "a_fo", "[", "k", "]", "for", "k", "in", "self", ".", "__zones", "}", "}", "else", ":", "return", "{", "}" ]
Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } }
[ "Return", "the", "head", "-", "to", "-", "head", "face", "-", "off", "outcomes", "between", "two", "players", ".", "If", "the", "matchup", "didn", "t", "happen", "{", "}", "is", "returned", ".", ":", "param", "home_num", ":", "the", "number", "of", "the", "home", "team", "player", ":", "param", "away_num", ":", "the", "number", "of", "the", "away", "team", "player", ":", "returns", ":", "dict", "either", "{", "}", "or", "the", "following", "..", "code", "::", "python", "{", "home", "/", "away", ":", "{", "off", "/", "def", "/", "neut", "/", "all", ":", "{", "won", ":", "won", "total", ":", "total", "}", "}", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L47-L72
train
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
FaceOffComparison.team_totals
def team_totals(self): """ Returns the overall faceoff win/total breakdown for home and away as :returns: dict, ``{ 'home/away': { 'won': won, 'total': total } }`` """ if self.__team_tots is None: self.__team_tots = self.__comp_tot() return { t: self.__team_tots[t]['all'] for t in [ 'home', 'away' ] }
python
def team_totals(self): """ Returns the overall faceoff win/total breakdown for home and away as :returns: dict, ``{ 'home/away': { 'won': won, 'total': total } }`` """ if self.__team_tots is None: self.__team_tots = self.__comp_tot() return { t: self.__team_tots[t]['all'] for t in [ 'home', 'away' ] }
[ "def", "team_totals", "(", "self", ")", ":", "if", "self", ".", "__team_tots", "is", "None", ":", "self", ".", "__team_tots", "=", "self", ".", "__comp_tot", "(", ")", "return", "{", "t", ":", "self", ".", "__team_tots", "[", "t", "]", "[", "'all'", "]", "for", "t", "in", "[", "'home'", ",", "'away'", "]", "}" ]
Returns the overall faceoff win/total breakdown for home and away as :returns: dict, ``{ 'home/away': { 'won': won, 'total': total } }``
[ "Returns", "the", "overall", "faceoff", "win", "/", "total", "breakdown", "for", "home", "and", "away", "as", ":", "returns", ":", "dict", "{", "home", "/", "away", ":", "{", "won", ":", "won", "total", ":", "total", "}", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L75-L87
train
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
FaceOffComparison.by_zone
def by_zone(self): """ Returns the faceoff win/total breakdown by zone for home and away as .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } :returns: dict """ if self.__team_tots is None: self.__team_tots = self.__comp_tot() return { t: { z: self.__team_tots[t][z] for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
python
def by_zone(self): """ Returns the faceoff win/total breakdown by zone for home and away as .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } :returns: dict """ if self.__team_tots is None: self.__team_tots = self.__comp_tot() return { t: { z: self.__team_tots[t][z] for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
[ "def", "by_zone", "(", "self", ")", ":", "if", "self", ".", "__team_tots", "is", "None", ":", "self", ".", "__team_tots", "=", "self", ".", "__comp_tot", "(", ")", "return", "{", "t", ":", "{", "z", ":", "self", ".", "__team_tots", "[", "t", "]", "[", "z", "]", "for", "z", "in", "self", ".", "__zones", "if", "z", "!=", "'all'", "}", "for", "t", "in", "[", "'home'", ",", "'away'", "]", "}" ]
Returns the faceoff win/total breakdown by zone for home and away as .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } :returns: dict
[ "Returns", "the", "faceoff", "win", "/", "total", "breakdown", "by", "zone", "for", "home", "and", "away", "as", "..", "code", "::", "python", "{", "home", "/", "away", ":", "{", "off", "/", "def", "/", "neut", "/", "all", ":", "{", "won", ":", "won", "total", ":", "total", "}", "}", "}", ":", "returns", ":", "dict" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L90-L113
train
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
FaceOffComparison.fo_pct
def fo_pct(self): """ Get the by team overall face-off win %. :returns: dict, ``{ 'home': %, 'away': % }`` """ tots = self.team_totals return { t: tots[t]['won']/(1.0*tots[t]['total']) if tots[t]['total'] else 0.0 for t in [ 'home', 'away' ] }
python
def fo_pct(self): """ Get the by team overall face-off win %. :returns: dict, ``{ 'home': %, 'away': % }`` """ tots = self.team_totals return { t: tots[t]['won']/(1.0*tots[t]['total']) if tots[t]['total'] else 0.0 for t in [ 'home', 'away' ] }
[ "def", "fo_pct", "(", "self", ")", ":", "tots", "=", "self", ".", "team_totals", "return", "{", "t", ":", "tots", "[", "t", "]", "[", "'won'", "]", "/", "(", "1.0", "*", "tots", "[", "t", "]", "[", "'total'", "]", ")", "if", "tots", "[", "t", "]", "[", "'total'", "]", "else", "0.0", "for", "t", "in", "[", "'home'", ",", "'away'", "]", "}" ]
Get the by team overall face-off win %. :returns: dict, ``{ 'home': %, 'away': % }``
[ "Get", "the", "by", "team", "overall", "face", "-", "off", "win", "%", ".", ":", "returns", ":", "dict", "{", "home", ":", "%", "away", ":", "%", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L116-L126
train
robhowley/nhlscrapi
nhlscrapi/games/faceoffcomp.py
FaceOffComparison.fo_pct_by_zone
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
python
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
[ "def", "fo_pct_by_zone", "(", "self", ")", ":", "bz", "=", "self", ".", "by_zone", "return", "{", "t", ":", "{", "z", ":", "bz", "[", "t", "]", "[", "z", "]", "[", "'won'", "]", "/", "(", "1.0", "*", "bz", "[", "t", "]", "[", "z", "]", "[", "'total'", "]", ")", "if", "bz", "[", "t", "]", "[", "z", "]", "[", "'total'", "]", "else", "0.0", "for", "z", "in", "self", ".", "__zones", "if", "z", "!=", "'all'", "}", "for", "t", "in", "[", "'home'", ",", "'away'", "]", "}" ]
Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
[ "Get", "the", "by", "team", "face", "-", "off", "win", "%", "by", "zone", ".", "Format", "is", ":", "returns", ":", "dict", "{", "home", "/", "away", ":", "{", "off", "/", "def", "/", "neut", ":", "%", "}", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/faceoffcomp.py#L129-L144
train
robhowley/nhlscrapi
nhlscrapi/games/cumstats.py
TeamIncrementor.update
def update(self, play): """ Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }`` """ new_tally = { } #if any(isinstance(play.event, te) for te in self.trigger_event_types): if self._count_play(play): # the team who made the play / triggered the event team = self._get_team(play) try: self.total[team] += 1 except: self.total[team] = 1 self.teams.append(team) for i in range(len(self.tally)): self.tally[i][team] = 0 try: new_tally = { k:v for k,v in self.tally[len(self.tally)-1].items() } new_tally['period'] = play.period new_tally['time'] = play.time new_tally[team] += 1 new_tally['play'] = play except: new_tally = { 'period': play.period, 'time': play.time, team: 1, 'play': play } self.tally.append(new_tally) return new_tally
python
def update(self, play): """ Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }`` """ new_tally = { } #if any(isinstance(play.event, te) for te in self.trigger_event_types): if self._count_play(play): # the team who made the play / triggered the event team = self._get_team(play) try: self.total[team] += 1 except: self.total[team] = 1 self.teams.append(team) for i in range(len(self.tally)): self.tally[i][team] = 0 try: new_tally = { k:v for k,v in self.tally[len(self.tally)-1].items() } new_tally['period'] = play.period new_tally['time'] = play.time new_tally[team] += 1 new_tally['play'] = play except: new_tally = { 'period': play.period, 'time': play.time, team: 1, 'play': play } self.tally.append(new_tally) return new_tally
[ "def", "update", "(", "self", ",", "play", ")", ":", "new_tally", "=", "{", "}", "#if any(isinstance(play.event, te) for te in self.trigger_event_types):", "if", "self", ".", "_count_play", "(", "play", ")", ":", "# the team who made the play / triggered the event", "team", "=", "self", ".", "_get_team", "(", "play", ")", "try", ":", "self", ".", "total", "[", "team", "]", "+=", "1", "except", ":", "self", ".", "total", "[", "team", "]", "=", "1", "self", ".", "teams", ".", "append", "(", "team", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "tally", ")", ")", ":", "self", ".", "tally", "[", "i", "]", "[", "team", "]", "=", "0", "try", ":", "new_tally", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "tally", "[", "len", "(", "self", ".", "tally", ")", "-", "1", "]", ".", "items", "(", ")", "}", "new_tally", "[", "'period'", "]", "=", "play", ".", "period", "new_tally", "[", "'time'", "]", "=", "play", ".", "time", "new_tally", "[", "team", "]", "+=", "1", "new_tally", "[", "'play'", "]", "=", "play", "except", ":", "new_tally", "=", "{", "'period'", ":", "play", ".", "period", ",", "'time'", ":", "play", ".", "time", ",", "team", ":", "1", ",", "'play'", ":", "play", "}", "self", ".", "tally", ".", "append", "(", "new_tally", ")", "return", "new_tally" ]
Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }``
[ "Update", "the", "accumulator", "with", "the", "current", "play", ":", "returns", ":", "new", "tally", ":", "rtype", ":", "dict", "{", "period", ":", "per", "time", ":", "clock", "team", ":", "cumul", "play", ":", "play", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/cumstats.py#L55-L93
train
robhowley/nhlscrapi
nhlscrapi/games/cumstats.py
Corsi.share
def share(self): """ The Cori-share (% of shot attempts) for each team :returns: dict, ``{ 'home_name': %, 'away_name': % }`` """ tot = sum(self.total.values()) return { k: v/float(tot) for k,v in self.total.items() }
python
def share(self): """ The Cori-share (% of shot attempts) for each team :returns: dict, ``{ 'home_name': %, 'away_name': % }`` """ tot = sum(self.total.values()) return { k: v/float(tot) for k,v in self.total.items() }
[ "def", "share", "(", "self", ")", ":", "tot", "=", "sum", "(", "self", ".", "total", ".", "values", "(", ")", ")", "return", "{", "k", ":", "v", "/", "float", "(", "tot", ")", "for", "k", ",", "v", "in", "self", ".", "total", ".", "items", "(", ")", "}" ]
The Cori-share (% of shot attempts) for each team :returns: dict, ``{ 'home_name': %, 'away_name': % }``
[ "The", "Cori", "-", "share", "(", "%", "of", "shot", "attempts", ")", "for", "each", "team", ":", "returns", ":", "dict", "{", "home_name", ":", "%", "away_name", ":", "%", "}" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/cumstats.py#L171-L178
train
robhowley/nhlscrapi
nhlscrapi/games/playbyplay.py
PlayByPlay.compute_stats
def compute_stats(self): """ Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict """ if not self.__have_stats: if self.init_cs_teams and self.cum_stats: self.__init_cs_teams() for play in self._rep_reader.parse_plays_stream(): p = Play(**play) self.__wrapped_plays.append(p) if self.cum_stats: self.__process(p, self.cum_stats, 'update') self.__have_stats = True return self.cum_stats
python
def compute_stats(self): """ Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict """ if not self.__have_stats: if self.init_cs_teams and self.cum_stats: self.__init_cs_teams() for play in self._rep_reader.parse_plays_stream(): p = Play(**play) self.__wrapped_plays.append(p) if self.cum_stats: self.__process(p, self.cum_stats, 'update') self.__have_stats = True return self.cum_stats
[ "def", "compute_stats", "(", "self", ")", ":", "if", "not", "self", ".", "__have_stats", ":", "if", "self", ".", "init_cs_teams", "and", "self", ".", "cum_stats", ":", "self", ".", "__init_cs_teams", "(", ")", "for", "play", "in", "self", ".", "_rep_reader", ".", "parse_plays_stream", "(", ")", ":", "p", "=", "Play", "(", "*", "*", "play", ")", "self", ".", "__wrapped_plays", ".", "append", "(", "p", ")", "if", "self", ".", "cum_stats", ":", "self", ".", "__process", "(", "p", ",", "self", ".", "cum_stats", ",", "'update'", ")", "self", ".", "__have_stats", "=", "True", "return", "self", ".", "cum_stats" ]
Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict
[ "Compute", "the", "stats", "defined", "in", "self", ".", "cum_stats", ".", ":", "returns", ":", "collection", "of", "all", "computed", ":", "py", ":", "class", ":", ".", "AccumulateStats", ":", "rtype", ":", "dict" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/games/playbyplay.py#L84-L102
train
robhowley/nhlscrapi
nhlscrapi/scrapr/nhlreq.py
NHLCn.__html_rep
def __html_rep(self, game_key, rep_code): """Retrieves the nhl html reports for the specified game and report code""" seas, gt, num = game_key.to_tuple() url = [ self.__domain, "scores/htmlreports/", str(seas-1), str(seas), "/", rep_code, "0", str(gt), ("%04i" % (num)), ".HTM" ] url = ''.join(url) return self.__open(url)
python
def __html_rep(self, game_key, rep_code): """Retrieves the nhl html reports for the specified game and report code""" seas, gt, num = game_key.to_tuple() url = [ self.__domain, "scores/htmlreports/", str(seas-1), str(seas), "/", rep_code, "0", str(gt), ("%04i" % (num)), ".HTM" ] url = ''.join(url) return self.__open(url)
[ "def", "__html_rep", "(", "self", ",", "game_key", ",", "rep_code", ")", ":", "seas", ",", "gt", ",", "num", "=", "game_key", ".", "to_tuple", "(", ")", "url", "=", "[", "self", ".", "__domain", ",", "\"scores/htmlreports/\"", ",", "str", "(", "seas", "-", "1", ")", ",", "str", "(", "seas", ")", ",", "\"/\"", ",", "rep_code", ",", "\"0\"", ",", "str", "(", "gt", ")", ",", "(", "\"%04i\"", "%", "(", "num", ")", ")", ",", "\".HTM\"", "]", "url", "=", "''", ".", "join", "(", "url", ")", "return", "self", ".", "__open", "(", "url", ")" ]
Retrieves the nhl html reports for the specified game and report code
[ "Retrieves", "the", "nhl", "html", "reports", "for", "the", "specified", "game", "and", "report", "code" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/nhlreq.py#L17-L24
train
michaelherold/pyIsEmail
pyisemail/validators/parser_validator.py
to_char
def to_char(token): """Transforms the ASCII control character symbols to their real char. Note: If the token is not an ASCII control character symbol, just return the token. Keyword arguments: token -- the token to transform """ if ord(token) in _range(9216, 9229 + 1): token = _unichr(ord(token) - 9216) return token
python
def to_char(token): """Transforms the ASCII control character symbols to their real char. Note: If the token is not an ASCII control character symbol, just return the token. Keyword arguments: token -- the token to transform """ if ord(token) in _range(9216, 9229 + 1): token = _unichr(ord(token) - 9216) return token
[ "def", "to_char", "(", "token", ")", ":", "if", "ord", "(", "token", ")", "in", "_range", "(", "9216", ",", "9229", "+", "1", ")", ":", "token", "=", "_unichr", "(", "ord", "(", "token", ")", "-", "9216", ")", "return", "token" ]
Transforms the ASCII control character symbols to their real char. Note: If the token is not an ASCII control character symbol, just return the token. Keyword arguments: token -- the token to transform
[ "Transforms", "the", "ASCII", "control", "character", "symbols", "to", "their", "real", "char", "." ]
dd42d6425c59e5061fc214d42672210dccc64cf5
https://github.com/michaelherold/pyIsEmail/blob/dd42d6425c59e5061fc214d42672210dccc64cf5/pyisemail/validators/parser_validator.py#L48-L61
train
michaelherold/pyIsEmail
pyisemail/validators/parser_validator.py
ParserValidator.is_email
def is_email(self, address, diagnose=False): """Check that an address address conforms to RFCs 5321, 5322 and others. More specifically, see the follow RFCs: * http://tools.ietf.org/html/rfc5321 * http://tools.ietf.org/html/rfc5322 * http://tools.ietf.org/html/rfc4291#section-2.2 * http://tools.ietf.org/html/rfc1123#section-2.1 * http://tools.ietf.org/html/rfc3696) (guidance only) Keyword arguments: address -- address to check. diagnose -- flag to report a diagnosis or a boolean (default False) """ threshold = BaseDiagnosis.CATEGORIES['VALID'] return_status = [ValidDiagnosis()] parse_data = {} # Parse the address into components, character by character raw_length = len(address) context = Context.LOCALPART # Where we are context_stack = [context] # Where we've been context_prior = Context.LOCALPART # Where we just came from token = '' # The current character token_prior = '' # The previous character parse_data[Context.LOCALPART] = '' # The address' components parse_data[Context.DOMAIN] = '' atom_list = { Context.LOCALPART: [''], Context.DOMAIN: [''] } # The address' dot-atoms element_count = 0 element_len = 0 hyphen_flag = False # Hyphen cannot occur at the end of a subdomain end_or_die = False # CFWS can only appear at the end of an element skip = False # Skip flag that simulates i++ crlf_count = -1 # crlf_count = -1 == !isset(crlf_count) for i in _range(raw_length): # Skip simulates the use of ++ operator if skip: skip = False continue token = address[i] token = to_char(token) # Switch to simulate decrementing; needed for FWS repeat = True while repeat: repeat = False # ------------------------------------------------------- # Local part # ------------------------------------------------------- if context == Context.LOCALPART: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # local-part = dot-atom / quoted-string / # obs-local-part # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # obs-local-part = word *("." word) # # word = atom / quoted-string # # atom = [CFWS] 1*atext [CFWS] if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments are OK at the beginning of an element if element_count == 0: return_status.append(CFWSDiagnosis('COMMENT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) else: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if end_or_die: return_status.append( DeprecatedDiagnosis('LOCALPART')) # CFWS & quoted strings are OK again now we're at # the beginning of an element (although they are # obsolete forms) end_or_die = False element_len = 0 element_count += 1 parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART].append('') elif token == Char.DQUOTE: if element_len == 0: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if element_count == 0: return_status.append( RFC5321Diagnosis('QUOTEDSTRING')) else: return_status.append( DeprecatedDiagnosis('LOCALPART')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 end_or_die = True context_stack.append(context) context = Context.QUOTEDSTRING else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append(CFWSDiagnosis('FWS')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # @ elif token == Char.AT: # At this point we should have a valid local-part if len(context_stack) != 1: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False if parse_data[Context.LOCALPART] == '': # Fatal error return_status.append( InvalidDiagnosis('NOLOCALPART')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.1 # The maximum total length of a user name or other # local-part is 64 octets. elif len(parse_data[Context.LOCALPART]) > 64: return_status.append( RFC5322Diagnosis('LOCAL_TOOLONG')) # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Comments and folding white space # SHOULD NOT be used around the "@" in the addr-spec. # # http://tools.ietf.org/html/rfc2119 # 4. SHOULD NOT This phrase, or the phrase "NOT # RECOMMENDED" mean that there may exist valid # reasons in particular circumstances when the # particular behavior is acceptable or even useful, # but the full implications should be understood and # the case carefully weighed before implementing any # behavior described with this label. elif context_prior in [Context.COMMENT, Context.FWS]: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) # Clear everything down for the domain parsing context = Context.DOMAIN context_stack = [] element_count = 0 element_len = 0 # CFWS can only appear at the end of the element end_or_die = False # atext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.QUOTEDSTRING: return_status.append( InvalidDiagnosis('ATEXT_AFTER_QS')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False else: context_prior = context o = ord(token) if (o < 33 or o > 126 or o == 10 or token in Char.SPECIALS): return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain # ------------------------------------------------------- elif context == Context.DOMAIN: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain = dot-atom / domain-literal / obs-domain # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-domain = atom *("." atom) # # atom = [CFWS] 1*atext [CFWS] # # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Mailbox = Local-part # "@" # ( Domain / address-literal ) # # Domain = sub-domain *("." sub-domain) # # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Note: A liberal syntax for the domain portion of # addr-spec is given here. However, the domain portion # contains addressing information specified by and # used in other protocols (e.g., RFC 1034, RFC 1035, # RFC 1123, RFC5321). It is therefore incumbent upon # implementations to conform to the syntax of # addresse for the context in which they are used. # is_email() author's note: it's not clear how to interpret # this in the context of a general address address # validator. The conclusion I have reached is this: # "addressing information" must comply with RFC 5321 (and # in turn RFC 1035), anything that is "semantically # invisible" must comply only with RFC 5322. # Comment if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments at the start of the domain are # deprecated in the text # Comments at the start of a subdomain are # obs-domain # (http://tools.ietf.org/html/rfc5322#section-3.4.1) if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT # Next dot-atom element elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) elif hyphen_flag: # Previous subdomain ended in a hyphen. Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENEND')) else: # Nowhere in RFC 5321 does it say explicitly that # the domain part of a Mailbox must be a valid # domain according to the DNS standards set out in # RFC 1035, but this *is* implied in several # places. For instance, wherever the idea of host # routing is discussed the RFC says that the domain # must be looked up in the DNS. This would be # nonsense unless the domain was designed to be a # valid DNS domain. Hence we must conclude that the # RFC 1035 restriction on label length also applies # to RFC 5321 domains. # # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less if element_len > 63: return_status.append( RFC5322Diagnosis('LABEL_TOOLONG')) # CFWS is OK again now we're at the beginning of an # element (although it may be obsolete CFWS) end_or_die = False element_len = 0 element_count += 1 atom_list[Context.DOMAIN].append('') parse_data[Context.DOMAIN] += token # Domain literal elif token == Char.OPENSQBRACKET: if parse_data[Context.DOMAIN] == '': # Domain literal must be the only component end_or_die = True element_len += 1 context_stack.append(context) context = Context.LITERAL parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token parse_data['literal'] = '' else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if i+1 == raw_length or (to_char(address[i + 1]) != Char.LF): # Fatal error return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: return_status.append(CFWSDiagnosis('FWS')) # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # atext else: # RFC 5322 allows any atext... # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" # But RFC 5321 only allows letter-digit-hyphen to # comply with DNS rules (RFCs 1034 & 1123) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # sub-domain = Let-dig [Ldh-str] # # Let-dig = ALPHA / DIGIT # # Ldh-str = *( ALPHA / DIGIT / "-" ) Let-dig # if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.LITERAL: return_status.append( InvalidDiagnosis('ATEXT_AFTER_DOMLIT')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False o = ord(token) # Assume this token isn't a hyphen unless we discover # it is hyphen_flag = False if o < 33 or o > 126 or token in Char.SPECIALS: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) elif token == Char.HYPHEN: if element_len == 0: # Hyphens can't be at the beginning of a # subdomain # Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENSTART')) hyphen_flag = True elif not (47 < o < 58 or 64 < o < 91 or 96 < o < 123): # Not an RFC 5321 subdomain, but still OK by RFC # 5322 return_status.append(RFC5322Diagnosis('DOMAIN')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain literal # ------------------------------------------------------- elif context == Context.LITERAL: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # End of domain literal if token == Char.CLOSESQBRACKET: if (max(return_status) < BaseDiagnosis.CATEGORIES['DEPREC']): # Could be a valid RFC 5321 address literal, so # let's check # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5321#section-4.1.3 # IPv4-address-literal = Snum 3("." Snum) # # IPv6-address-literal = "IPv6:" IPv6-addr # # General-address-literal = Standardized-tag ":" # 1*dcontent # # Standardized-tag = Ldh-str # ; Standardized-tag MUST be # ; specified in a # ; Standards-Track RFC and # ; registered with IANA # # dcontent = %d33-90 / ; Printable US-ASCII # %d94-126 ; excl. "[", "\", "]" # # Snum = 1*3DIGIT # ; representing a decimal integer # ; value in the range 0-255 # # IPv6-addr = IPv6-full / IPv6-comp / # IPv6v4-full / IPv6v4-comp # # IPv6-hex = 1*4HEXDIG # # IPv6-full = IPv6-hex 7(":" IPv6-hex) # # IPv6-comp = [IPv6-hex *5(":" IPv6-hex)] # "::" # [IPv6-hex *5(":" IPv6-hex)] # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 6 groups in addition to # ; the "::" may be present. # # IPv6v4-full = IPv6-hex 5(":" IPv6-hex) ":" # IPv4-address-literal # # IPv6v4-comp = [IPv6-hex *3(":" IPv6-hex)] # "::" # [IPv6-hex *3(":" IPv6-hex) ":"] # IPv4-address-literal # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 4 groups in addition to # ; the "::" and # ; IPv4-address-literal may be # ; present. max_groups = 8 index = False address_literal = parse_data['literal'] # Extract IPv4 part from the end of the # address-literal (if there is one) regex = ( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)" r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) match_ip = re.search(regex, address_literal) if match_ip: index = address_literal.rfind( match_ip.group(0)) if index != 0: # Convert IPv4 part to IPv6 format for # further testing address_literal = ( address_literal[0:index] + '0:0') if index == 0 and index is not False: # Nothing there except a valid IPv4 address return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) elif not address_literal.startswith(Char.IPV6TAG): return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) else: ipv6 = address_literal[5:] # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy match_ip = ipv6.split(Char.COLON) grp_count = len(match_ip) index = ipv6.find(Char.DOUBLECOLON) if index == -1: # We need exactly the right number of # groups if grp_count != max_groups: return_status.append( RFC5322Diagnosis('IPV6_GRPCOUNT')) else: if index != ipv6.rfind(Char.DOUBLECOLON): return_status.append( RFC5322Diagnosis('IPV6_2X2XCOLON')) else: if index in [0, len(ipv6) - 2]: # RFC 4291 allows :: at the start # or end of an address with 7 other # groups in addition max_groups += 1 if grp_count > max_groups: return_status.append( RFC5322Diagnosis( 'IPV6_MAXGRPS')) elif grp_count == max_groups: # Eliding a single "::" return_status.append( RFC5321Diagnosis( 'IPV6DEPRECATED')) # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy if (ipv6[0] == Char.COLON and ipv6[1] != Char.COLON): # Address starts with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONSTRT')) elif (ipv6[-1] == Char.COLON and ipv6[-2] != Char.COLON): # Address ends with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONEND')) elif ([re.match(r"^[0-9A-Fa-f]{0,4}$", i) for i in match_ip].count(None) != 0): # Check for unmatched characters return_status.append( RFC5322Diagnosis('IPV6_BADCHAR')) else: return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) else: return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() elif token == Char.BACKSLASH: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # dtext else: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the # %d14-31 / ; carriage return, line # %d127 ; feed, and white space # ; characters o = ord(token) # CR, LF, SP & HTAB have already been parsed above if o > 127 or o == 0 or token == Char.OPENSQBRACKET: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_DTEXT')) break elif o < 33 or o == 127: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) parse_data['literal'] += token parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted string # ------------------------------------------------------- elif context == Context.QUOTEDSTRING: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # qcontent = qtext / quoted-pair # Quoted pair if token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) # Inside a quoted string, spaces are allow as regular # characters. It's only FWS if we include HTAB or CRLF elif token in [Char.CR, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break # http://tools.ietf.org/html/rfc5322#section-3.2.2 # Runs of FWS, comment, or CFWS that occur between # lexical tokens in a structured header field are # semantically interpreted as a single space # character. # http://tools.ietf.org/html/rfc5322#section-3.2.4 # the CRLF in any FWS/CFWS that appears within the # quoted string [is] semantically "invisible" and # therefore not part of the quoted-string parse_data[Context.LOCALPART] += Char.SP atom_list[Context.LOCALPART][element_count] += Char.SP element_len += 1 return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # End of quoted string elif token == Char.DQUOTE: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() # qtext else: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # qtext = %d33 / ; Printable US-ASCII # %d35-91 / ; characters not # %d93-126 / ; including "\" or # obs-qtext ; the quote # ; character # # obs-qtext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the CR, # %d14-31 / ; LF, and white space # %d127 ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QTEXT')) elif o < 32 or o == 127: return_status.append( DeprecatedDiagnosis('QTEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted pair # ------------------------------------------------------- elif context == Context.QUOTEDPAIR: # http://tools.ietf.org/html/rfc5322#section-3.2.1 # quoted-pair = ("\" (VCHAR / WSP)) / obs-qp # # VCHAR = %d33-126 ; visible (printing) # ; characters # # WSP = SP / HTAB ; white space # # obs-qp = "\" (%d0 / obs-NO-WS-CTL / LF / CR) # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do not # %d12 / ; include the carriage # %d14-31 / ; return, line feed, and # %d127 ; white space characters # # i.e. obs-qp = "\" (%d0-8, %d10-31 / %d127) o = ord(token) if o > 127: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QPAIR')) elif (o < 31 and o != 9) or o == 127: # SP & HTAB are allowed return_status.append(DeprecatedDiagnosis('QP')) # At this point we know where this qpair occurred so # we could check to see if the character actually # needed to be quoted at all. # http://tools.ietf.org/html/rfc5321#section-4.1.2 # the sending system SHOULD transmit the # form that uses the minimum quoting possible. context_prior = context context = context_stack.pop() # End of qpair token = Char.BACKSLASH + token if context == Context.COMMENT: pass elif context == Context.QUOTEDSTRING: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 elif context == Context.LITERAL: parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # ------------------------------------------------------- # Comment # ------------------------------------------------------- elif context == Context.COMMENT: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # comment = "(" *([FWS] ccontent) [FWS] ")" # # ccontent = ctext / quoted-pair / comment # Nested comment if token == Char.OPENPARENTHESIS: # Nested comments are OK context_stack.append(context) context = Context.COMMENT # End of comment elif token == Char.CLOSEPARENTHESIS: context_prior = context context = context_stack.pop() # Quoted pair elif token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # ctext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # ctext = %d33-39 / ; Printable US- # %d42-91 / ; ASCII characters # %d93-126 / ; not including # obs-ctext ; "(", ")", or "\" # # obs-ctext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that # %d12 / ; do not include # %d14-31 / ; the CR, LF, and # ; white space # ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_CTEXT')) break elif o < 32 or o == 127: return_status.append(DeprecatedDiagnosis('CTEXT')) # ------------------------------------------------------- # Folding White Space (FWS) # ------------------------------------------------------- elif context == Context.FWS: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # FWS = ([*WSP CRLF] 1*WSP) / obs-FWS # ; Folding white space # # But note the erratum: # http://www.rfc-editor.org/errata_search.php?rfc=5322&eid=1908 # In the obsolete syntax, any amount of folding white # space MAY be inserted where the obs-FWS rule is # allowed. This creates the possibility of having two # consecutive "folds" in a line, and therefore the # possibility that a line which makes up a folded header # field could be composed entirely of white space. # # obs-FWS = 1*([CRLF] WSP) if token_prior == Char.CR: if token == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_X2')) break if crlf_count != -1: crlf_count += 1 if crlf_count > 1: # Multiple folds = obsolete FWS return_status.append( DeprecatedDiagnosis('FWS')) else: crlf_count = 1 # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append(InvalidDiagnosis('CR_NO_LF')) break elif token in [Char.SP, Char.HTAB]: pass else: if token_prior == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_END')) break if crlf_count != -1: crlf_count = -1 context_prior = context # End of FWS context = context_stack.pop() # Look at this token again in the parent context repeat = True token_prior = token # ------------------------------------------------------- # A context we aren't expecting # ------------------------------------------------------- else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # No point in going on if we've got a fatal error if max(return_status) > BaseDiagnosis.CATEGORIES['RFC5322']: break # Some simple final tests if max(return_status) < BaseDiagnosis.CATEGORIES['RFC5322']: if context == Context.QUOTEDSTRING: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDQUOTEDSTR')) elif context == Context.QUOTEDPAIR: # Fatal error return_status.append(InvalidDiagnosis('BACKSLASHEND')) elif context == Context.COMMENT: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDCOMMENT')) elif context == Context.LITERAL: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDDOMLIT')) elif token == Char.CR: # Fatal error return_status.append(InvalidDiagnosis('FWS_CRLF_END')) elif parse_data[Context.DOMAIN] == '': # Fatal error return_status.append(InvalidDiagnosis('NODOMAIN')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) elif hyphen_flag: # Fatal error return_status.append(InvalidDiagnosis('DOMAINHYPHENEND')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.2 # The maximum total length of a domain name or number is 255 octets elif len(parse_data[Context.DOMAIN]) > 255: return_status.append(RFC5322Diagnosis('DOMAIN_TOOLONG')) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Forward-path = Path # # Path = "<" [ A-d-l ":" ] Mailbox ">" # # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.3 # The maximum total length of a reverse-path or forward-path is # 256 octets (including the punctuation and element separators). # # Thus, even without (obsolete) routing information, the Mailbox # can only be 254 characters long. This is confirmed by this # verified erratum to RFC 3696: # # http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690 # However, there is a restriction in RFC 2821 on the length of an # address in MAIL and RCPT commands of 254 characters. Since # addresses that do not fit in those fields are not normally # useful, the upper limit on address lengths should normally be # considered to be 254. elif len(parse_data[Context.LOCALPART] + Char.AT + parse_data[Context.DOMAIN]) > 254: return_status.append(RFC5322Diagnosis('TOOLONG')) # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less elif element_len > 63: return_status.append(RFC5322Diagnosis('LABEL_TOOLONG')) return_status = list(set(return_status)) final_status = max(return_status) if len(return_status) != 1: # Remove redundant ValidDiagnosis return_status.pop(0) parse_data['status'] = return_status if final_status < threshold: final_status = ValidDiagnosis() if diagnose: return final_status else: return final_status < BaseDiagnosis.CATEGORIES['THRESHOLD']
python
def is_email(self, address, diagnose=False): """Check that an address address conforms to RFCs 5321, 5322 and others. More specifically, see the follow RFCs: * http://tools.ietf.org/html/rfc5321 * http://tools.ietf.org/html/rfc5322 * http://tools.ietf.org/html/rfc4291#section-2.2 * http://tools.ietf.org/html/rfc1123#section-2.1 * http://tools.ietf.org/html/rfc3696) (guidance only) Keyword arguments: address -- address to check. diagnose -- flag to report a diagnosis or a boolean (default False) """ threshold = BaseDiagnosis.CATEGORIES['VALID'] return_status = [ValidDiagnosis()] parse_data = {} # Parse the address into components, character by character raw_length = len(address) context = Context.LOCALPART # Where we are context_stack = [context] # Where we've been context_prior = Context.LOCALPART # Where we just came from token = '' # The current character token_prior = '' # The previous character parse_data[Context.LOCALPART] = '' # The address' components parse_data[Context.DOMAIN] = '' atom_list = { Context.LOCALPART: [''], Context.DOMAIN: [''] } # The address' dot-atoms element_count = 0 element_len = 0 hyphen_flag = False # Hyphen cannot occur at the end of a subdomain end_or_die = False # CFWS can only appear at the end of an element skip = False # Skip flag that simulates i++ crlf_count = -1 # crlf_count = -1 == !isset(crlf_count) for i in _range(raw_length): # Skip simulates the use of ++ operator if skip: skip = False continue token = address[i] token = to_char(token) # Switch to simulate decrementing; needed for FWS repeat = True while repeat: repeat = False # ------------------------------------------------------- # Local part # ------------------------------------------------------- if context == Context.LOCALPART: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # local-part = dot-atom / quoted-string / # obs-local-part # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # obs-local-part = word *("." word) # # word = atom / quoted-string # # atom = [CFWS] 1*atext [CFWS] if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments are OK at the beginning of an element if element_count == 0: return_status.append(CFWSDiagnosis('COMMENT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) else: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if end_or_die: return_status.append( DeprecatedDiagnosis('LOCALPART')) # CFWS & quoted strings are OK again now we're at # the beginning of an element (although they are # obsolete forms) end_or_die = False element_len = 0 element_count += 1 parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART].append('') elif token == Char.DQUOTE: if element_len == 0: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if element_count == 0: return_status.append( RFC5321Diagnosis('QUOTEDSTRING')) else: return_status.append( DeprecatedDiagnosis('LOCALPART')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 end_or_die = True context_stack.append(context) context = Context.QUOTEDSTRING else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append(CFWSDiagnosis('FWS')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # @ elif token == Char.AT: # At this point we should have a valid local-part if len(context_stack) != 1: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False if parse_data[Context.LOCALPART] == '': # Fatal error return_status.append( InvalidDiagnosis('NOLOCALPART')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.1 # The maximum total length of a user name or other # local-part is 64 octets. elif len(parse_data[Context.LOCALPART]) > 64: return_status.append( RFC5322Diagnosis('LOCAL_TOOLONG')) # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Comments and folding white space # SHOULD NOT be used around the "@" in the addr-spec. # # http://tools.ietf.org/html/rfc2119 # 4. SHOULD NOT This phrase, or the phrase "NOT # RECOMMENDED" mean that there may exist valid # reasons in particular circumstances when the # particular behavior is acceptable or even useful, # but the full implications should be understood and # the case carefully weighed before implementing any # behavior described with this label. elif context_prior in [Context.COMMENT, Context.FWS]: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) # Clear everything down for the domain parsing context = Context.DOMAIN context_stack = [] element_count = 0 element_len = 0 # CFWS can only appear at the end of the element end_or_die = False # atext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.QUOTEDSTRING: return_status.append( InvalidDiagnosis('ATEXT_AFTER_QS')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False else: context_prior = context o = ord(token) if (o < 33 or o > 126 or o == 10 or token in Char.SPECIALS): return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain # ------------------------------------------------------- elif context == Context.DOMAIN: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain = dot-atom / domain-literal / obs-domain # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-domain = atom *("." atom) # # atom = [CFWS] 1*atext [CFWS] # # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Mailbox = Local-part # "@" # ( Domain / address-literal ) # # Domain = sub-domain *("." sub-domain) # # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Note: A liberal syntax for the domain portion of # addr-spec is given here. However, the domain portion # contains addressing information specified by and # used in other protocols (e.g., RFC 1034, RFC 1035, # RFC 1123, RFC5321). It is therefore incumbent upon # implementations to conform to the syntax of # addresse for the context in which they are used. # is_email() author's note: it's not clear how to interpret # this in the context of a general address address # validator. The conclusion I have reached is this: # "addressing information" must comply with RFC 5321 (and # in turn RFC 1035), anything that is "semantically # invisible" must comply only with RFC 5322. # Comment if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments at the start of the domain are # deprecated in the text # Comments at the start of a subdomain are # obs-domain # (http://tools.ietf.org/html/rfc5322#section-3.4.1) if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT # Next dot-atom element elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) elif hyphen_flag: # Previous subdomain ended in a hyphen. Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENEND')) else: # Nowhere in RFC 5321 does it say explicitly that # the domain part of a Mailbox must be a valid # domain according to the DNS standards set out in # RFC 1035, but this *is* implied in several # places. For instance, wherever the idea of host # routing is discussed the RFC says that the domain # must be looked up in the DNS. This would be # nonsense unless the domain was designed to be a # valid DNS domain. Hence we must conclude that the # RFC 1035 restriction on label length also applies # to RFC 5321 domains. # # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less if element_len > 63: return_status.append( RFC5322Diagnosis('LABEL_TOOLONG')) # CFWS is OK again now we're at the beginning of an # element (although it may be obsolete CFWS) end_or_die = False element_len = 0 element_count += 1 atom_list[Context.DOMAIN].append('') parse_data[Context.DOMAIN] += token # Domain literal elif token == Char.OPENSQBRACKET: if parse_data[Context.DOMAIN] == '': # Domain literal must be the only component end_or_die = True element_len += 1 context_stack.append(context) context = Context.LITERAL parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token parse_data['literal'] = '' else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if i+1 == raw_length or (to_char(address[i + 1]) != Char.LF): # Fatal error return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: return_status.append(CFWSDiagnosis('FWS')) # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # atext else: # RFC 5322 allows any atext... # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" # But RFC 5321 only allows letter-digit-hyphen to # comply with DNS rules (RFCs 1034 & 1123) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # sub-domain = Let-dig [Ldh-str] # # Let-dig = ALPHA / DIGIT # # Ldh-str = *( ALPHA / DIGIT / "-" ) Let-dig # if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.LITERAL: return_status.append( InvalidDiagnosis('ATEXT_AFTER_DOMLIT')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False o = ord(token) # Assume this token isn't a hyphen unless we discover # it is hyphen_flag = False if o < 33 or o > 126 or token in Char.SPECIALS: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) elif token == Char.HYPHEN: if element_len == 0: # Hyphens can't be at the beginning of a # subdomain # Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENSTART')) hyphen_flag = True elif not (47 < o < 58 or 64 < o < 91 or 96 < o < 123): # Not an RFC 5321 subdomain, but still OK by RFC # 5322 return_status.append(RFC5322Diagnosis('DOMAIN')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain literal # ------------------------------------------------------- elif context == Context.LITERAL: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # End of domain literal if token == Char.CLOSESQBRACKET: if (max(return_status) < BaseDiagnosis.CATEGORIES['DEPREC']): # Could be a valid RFC 5321 address literal, so # let's check # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5321#section-4.1.3 # IPv4-address-literal = Snum 3("." Snum) # # IPv6-address-literal = "IPv6:" IPv6-addr # # General-address-literal = Standardized-tag ":" # 1*dcontent # # Standardized-tag = Ldh-str # ; Standardized-tag MUST be # ; specified in a # ; Standards-Track RFC and # ; registered with IANA # # dcontent = %d33-90 / ; Printable US-ASCII # %d94-126 ; excl. "[", "\", "]" # # Snum = 1*3DIGIT # ; representing a decimal integer # ; value in the range 0-255 # # IPv6-addr = IPv6-full / IPv6-comp / # IPv6v4-full / IPv6v4-comp # # IPv6-hex = 1*4HEXDIG # # IPv6-full = IPv6-hex 7(":" IPv6-hex) # # IPv6-comp = [IPv6-hex *5(":" IPv6-hex)] # "::" # [IPv6-hex *5(":" IPv6-hex)] # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 6 groups in addition to # ; the "::" may be present. # # IPv6v4-full = IPv6-hex 5(":" IPv6-hex) ":" # IPv4-address-literal # # IPv6v4-comp = [IPv6-hex *3(":" IPv6-hex)] # "::" # [IPv6-hex *3(":" IPv6-hex) ":"] # IPv4-address-literal # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 4 groups in addition to # ; the "::" and # ; IPv4-address-literal may be # ; present. max_groups = 8 index = False address_literal = parse_data['literal'] # Extract IPv4 part from the end of the # address-literal (if there is one) regex = ( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)" r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) match_ip = re.search(regex, address_literal) if match_ip: index = address_literal.rfind( match_ip.group(0)) if index != 0: # Convert IPv4 part to IPv6 format for # further testing address_literal = ( address_literal[0:index] + '0:0') if index == 0 and index is not False: # Nothing there except a valid IPv4 address return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) elif not address_literal.startswith(Char.IPV6TAG): return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) else: ipv6 = address_literal[5:] # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy match_ip = ipv6.split(Char.COLON) grp_count = len(match_ip) index = ipv6.find(Char.DOUBLECOLON) if index == -1: # We need exactly the right number of # groups if grp_count != max_groups: return_status.append( RFC5322Diagnosis('IPV6_GRPCOUNT')) else: if index != ipv6.rfind(Char.DOUBLECOLON): return_status.append( RFC5322Diagnosis('IPV6_2X2XCOLON')) else: if index in [0, len(ipv6) - 2]: # RFC 4291 allows :: at the start # or end of an address with 7 other # groups in addition max_groups += 1 if grp_count > max_groups: return_status.append( RFC5322Diagnosis( 'IPV6_MAXGRPS')) elif grp_count == max_groups: # Eliding a single "::" return_status.append( RFC5321Diagnosis( 'IPV6DEPRECATED')) # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy if (ipv6[0] == Char.COLON and ipv6[1] != Char.COLON): # Address starts with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONSTRT')) elif (ipv6[-1] == Char.COLON and ipv6[-2] != Char.COLON): # Address ends with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONEND')) elif ([re.match(r"^[0-9A-Fa-f]{0,4}$", i) for i in match_ip].count(None) != 0): # Check for unmatched characters return_status.append( RFC5322Diagnosis('IPV6_BADCHAR')) else: return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) else: return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() elif token == Char.BACKSLASH: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # dtext else: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the # %d14-31 / ; carriage return, line # %d127 ; feed, and white space # ; characters o = ord(token) # CR, LF, SP & HTAB have already been parsed above if o > 127 or o == 0 or token == Char.OPENSQBRACKET: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_DTEXT')) break elif o < 33 or o == 127: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) parse_data['literal'] += token parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted string # ------------------------------------------------------- elif context == Context.QUOTEDSTRING: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # qcontent = qtext / quoted-pair # Quoted pair if token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) # Inside a quoted string, spaces are allow as regular # characters. It's only FWS if we include HTAB or CRLF elif token in [Char.CR, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break # http://tools.ietf.org/html/rfc5322#section-3.2.2 # Runs of FWS, comment, or CFWS that occur between # lexical tokens in a structured header field are # semantically interpreted as a single space # character. # http://tools.ietf.org/html/rfc5322#section-3.2.4 # the CRLF in any FWS/CFWS that appears within the # quoted string [is] semantically "invisible" and # therefore not part of the quoted-string parse_data[Context.LOCALPART] += Char.SP atom_list[Context.LOCALPART][element_count] += Char.SP element_len += 1 return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # End of quoted string elif token == Char.DQUOTE: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() # qtext else: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # qtext = %d33 / ; Printable US-ASCII # %d35-91 / ; characters not # %d93-126 / ; including "\" or # obs-qtext ; the quote # ; character # # obs-qtext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the CR, # %d14-31 / ; LF, and white space # %d127 ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QTEXT')) elif o < 32 or o == 127: return_status.append( DeprecatedDiagnosis('QTEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted pair # ------------------------------------------------------- elif context == Context.QUOTEDPAIR: # http://tools.ietf.org/html/rfc5322#section-3.2.1 # quoted-pair = ("\" (VCHAR / WSP)) / obs-qp # # VCHAR = %d33-126 ; visible (printing) # ; characters # # WSP = SP / HTAB ; white space # # obs-qp = "\" (%d0 / obs-NO-WS-CTL / LF / CR) # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do not # %d12 / ; include the carriage # %d14-31 / ; return, line feed, and # %d127 ; white space characters # # i.e. obs-qp = "\" (%d0-8, %d10-31 / %d127) o = ord(token) if o > 127: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QPAIR')) elif (o < 31 and o != 9) or o == 127: # SP & HTAB are allowed return_status.append(DeprecatedDiagnosis('QP')) # At this point we know where this qpair occurred so # we could check to see if the character actually # needed to be quoted at all. # http://tools.ietf.org/html/rfc5321#section-4.1.2 # the sending system SHOULD transmit the # form that uses the minimum quoting possible. context_prior = context context = context_stack.pop() # End of qpair token = Char.BACKSLASH + token if context == Context.COMMENT: pass elif context == Context.QUOTEDSTRING: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 elif context == Context.LITERAL: parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # ------------------------------------------------------- # Comment # ------------------------------------------------------- elif context == Context.COMMENT: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # comment = "(" *([FWS] ccontent) [FWS] ")" # # ccontent = ctext / quoted-pair / comment # Nested comment if token == Char.OPENPARENTHESIS: # Nested comments are OK context_stack.append(context) context = Context.COMMENT # End of comment elif token == Char.CLOSEPARENTHESIS: context_prior = context context = context_stack.pop() # Quoted pair elif token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # ctext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # ctext = %d33-39 / ; Printable US- # %d42-91 / ; ASCII characters # %d93-126 / ; not including # obs-ctext ; "(", ")", or "\" # # obs-ctext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that # %d12 / ; do not include # %d14-31 / ; the CR, LF, and # ; white space # ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_CTEXT')) break elif o < 32 or o == 127: return_status.append(DeprecatedDiagnosis('CTEXT')) # ------------------------------------------------------- # Folding White Space (FWS) # ------------------------------------------------------- elif context == Context.FWS: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # FWS = ([*WSP CRLF] 1*WSP) / obs-FWS # ; Folding white space # # But note the erratum: # http://www.rfc-editor.org/errata_search.php?rfc=5322&eid=1908 # In the obsolete syntax, any amount of folding white # space MAY be inserted where the obs-FWS rule is # allowed. This creates the possibility of having two # consecutive "folds" in a line, and therefore the # possibility that a line which makes up a folded header # field could be composed entirely of white space. # # obs-FWS = 1*([CRLF] WSP) if token_prior == Char.CR: if token == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_X2')) break if crlf_count != -1: crlf_count += 1 if crlf_count > 1: # Multiple folds = obsolete FWS return_status.append( DeprecatedDiagnosis('FWS')) else: crlf_count = 1 # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append(InvalidDiagnosis('CR_NO_LF')) break elif token in [Char.SP, Char.HTAB]: pass else: if token_prior == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_END')) break if crlf_count != -1: crlf_count = -1 context_prior = context # End of FWS context = context_stack.pop() # Look at this token again in the parent context repeat = True token_prior = token # ------------------------------------------------------- # A context we aren't expecting # ------------------------------------------------------- else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # No point in going on if we've got a fatal error if max(return_status) > BaseDiagnosis.CATEGORIES['RFC5322']: break # Some simple final tests if max(return_status) < BaseDiagnosis.CATEGORIES['RFC5322']: if context == Context.QUOTEDSTRING: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDQUOTEDSTR')) elif context == Context.QUOTEDPAIR: # Fatal error return_status.append(InvalidDiagnosis('BACKSLASHEND')) elif context == Context.COMMENT: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDCOMMENT')) elif context == Context.LITERAL: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDDOMLIT')) elif token == Char.CR: # Fatal error return_status.append(InvalidDiagnosis('FWS_CRLF_END')) elif parse_data[Context.DOMAIN] == '': # Fatal error return_status.append(InvalidDiagnosis('NODOMAIN')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) elif hyphen_flag: # Fatal error return_status.append(InvalidDiagnosis('DOMAINHYPHENEND')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.2 # The maximum total length of a domain name or number is 255 octets elif len(parse_data[Context.DOMAIN]) > 255: return_status.append(RFC5322Diagnosis('DOMAIN_TOOLONG')) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Forward-path = Path # # Path = "<" [ A-d-l ":" ] Mailbox ">" # # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.3 # The maximum total length of a reverse-path or forward-path is # 256 octets (including the punctuation and element separators). # # Thus, even without (obsolete) routing information, the Mailbox # can only be 254 characters long. This is confirmed by this # verified erratum to RFC 3696: # # http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690 # However, there is a restriction in RFC 2821 on the length of an # address in MAIL and RCPT commands of 254 characters. Since # addresses that do not fit in those fields are not normally # useful, the upper limit on address lengths should normally be # considered to be 254. elif len(parse_data[Context.LOCALPART] + Char.AT + parse_data[Context.DOMAIN]) > 254: return_status.append(RFC5322Diagnosis('TOOLONG')) # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less elif element_len > 63: return_status.append(RFC5322Diagnosis('LABEL_TOOLONG')) return_status = list(set(return_status)) final_status = max(return_status) if len(return_status) != 1: # Remove redundant ValidDiagnosis return_status.pop(0) parse_data['status'] = return_status if final_status < threshold: final_status = ValidDiagnosis() if diagnose: return final_status else: return final_status < BaseDiagnosis.CATEGORIES['THRESHOLD']
[ "def", "is_email", "(", "self", ",", "address", ",", "diagnose", "=", "False", ")", ":", "threshold", "=", "BaseDiagnosis", ".", "CATEGORIES", "[", "'VALID'", "]", "return_status", "=", "[", "ValidDiagnosis", "(", ")", "]", "parse_data", "=", "{", "}", "# Parse the address into components, character by character", "raw_length", "=", "len", "(", "address", ")", "context", "=", "Context", ".", "LOCALPART", "# Where we are", "context_stack", "=", "[", "context", "]", "# Where we've been", "context_prior", "=", "Context", ".", "LOCALPART", "# Where we just came from", "token", "=", "''", "# The current character", "token_prior", "=", "''", "# The previous character", "parse_data", "[", "Context", ".", "LOCALPART", "]", "=", "''", "# The address' components", "parse_data", "[", "Context", ".", "DOMAIN", "]", "=", "''", "atom_list", "=", "{", "Context", ".", "LOCALPART", ":", "[", "''", "]", ",", "Context", ".", "DOMAIN", ":", "[", "''", "]", "}", "# The address' dot-atoms", "element_count", "=", "0", "element_len", "=", "0", "hyphen_flag", "=", "False", "# Hyphen cannot occur at the end of a subdomain", "end_or_die", "=", "False", "# CFWS can only appear at the end of an element", "skip", "=", "False", "# Skip flag that simulates i++", "crlf_count", "=", "-", "1", "# crlf_count = -1 == !isset(crlf_count)", "for", "i", "in", "_range", "(", "raw_length", ")", ":", "# Skip simulates the use of ++ operator", "if", "skip", ":", "skip", "=", "False", "continue", "token", "=", "address", "[", "i", "]", "token", "=", "to_char", "(", "token", ")", "# Switch to simulate decrementing; needed for FWS", "repeat", "=", "True", "while", "repeat", ":", "repeat", "=", "False", "# -------------------------------------------------------", "# Local part", "# -------------------------------------------------------", "if", "context", "==", "Context", ".", "LOCALPART", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# local-part = dot-atom / quoted-string /", "# obs-local-part", "#", "# dot-atom = [CFWS] dot-atom-text [CFWS]", "#", "# dot-atom-text = 1*atext *(\".\" 1*atext)", "#", "# quoted-string = [CFWS]", "# DQUOTE *([FWS] qcontent) [FWS] DQUOTE", "# [CFWS]", "#", "# obs-local-part = word *(\".\" word)", "#", "# word = atom / quoted-string", "#", "# atom = [CFWS] 1*atext [CFWS]", "if", "token", "==", "Char", ".", "OPENPARENTHESIS", ":", "if", "element_len", "==", "0", ":", "# Comments are OK at the beginning of an element", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'COMMENT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'COMMENT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'COMMENT'", ")", ")", "# We can't start a comment in the middle of an", "# element, so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "COMMENT", "elif", "token", "==", "Char", ".", "DOT", ":", "if", "element_len", "==", "0", ":", "# Another dot, already? Fatal error", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_START'", ")", ")", "else", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CONSECUTIVEDOTS'", ")", ")", "else", ":", "# The entire local-part can be a quoted string for", "# RFC 5321. If it's just one atom that is quoted", "# then it's an RFC 5322 obsolete form", "if", "end_or_die", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'LOCALPART'", ")", ")", "# CFWS & quoted strings are OK again now we're at", "# the beginning of an element (although they are", "# obsolete forms)", "end_or_die", "=", "False", "element_len", "=", "0", "element_count", "+=", "1", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", ".", "append", "(", "''", ")", "elif", "token", "==", "Char", ".", "DQUOTE", ":", "if", "element_len", "==", "0", ":", "# The entire local-part can be a quoted string for", "# RFC 5321. If it's just one atom that is quoted", "# then it's an RFC 5322 obsolete form", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'QUOTEDSTRING'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'LOCALPART'", ")", ")", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDSTRING", "else", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "if", "element_len", "==", "0", ":", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "# We can't start FWS in the middle of an element,", "# so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# @", "elif", "token", "==", "Char", ".", "AT", ":", "# At this point we should have a valid local-part", "if", "len", "(", "context_stack", ")", "!=", "1", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "if", "parse_data", "[", "Context", ".", "LOCALPART", "]", "==", "''", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'NOLOCALPART'", ")", ")", "elif", "element_len", "==", "0", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_END'", ")", ")", "# http://tools.ietf.org/html/rfc5321#section-4.5.3.1.1", "# The maximum total length of a user name or other", "# local-part is 64 octets.", "elif", "len", "(", "parse_data", "[", "Context", ".", "LOCALPART", "]", ")", ">", "64", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'LOCAL_TOOLONG'", ")", ")", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# Comments and folding white space", "# SHOULD NOT be used around the \"@\" in the addr-spec.", "#", "# http://tools.ietf.org/html/rfc2119", "# 4. SHOULD NOT This phrase, or the phrase \"NOT", "# RECOMMENDED\" mean that there may exist valid", "# reasons in particular circumstances when the", "# particular behavior is acceptable or even useful,", "# but the full implications should be understood and", "# the case carefully weighed before implementing any", "# behavior described with this label.", "elif", "context_prior", "in", "[", "Context", ".", "COMMENT", ",", "Context", ".", "FWS", "]", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CFWS_NEAR_AT'", ")", ")", "# Clear everything down for the domain parsing", "context", "=", "Context", ".", "DOMAIN", "context_stack", "=", "[", "]", "element_count", "=", "0", "element_len", "=", "0", "# CFWS can only appear at the end of the element", "end_or_die", "=", "False", "# atext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.3", "# atext = ALPHA / DIGIT / ; Printable US-ASCII", "# \"!\" / \"#\" / ; characters not", "# \"$\" / \"%\" / ; including specials.", "# \"&\" / \"'\" / ; Used for atoms.", "# \"*\" / \"+\" /", "# \"-\" / \"/\" /", "# \"=\" / \"?\" /", "# \"^\" / \"_\" /", "# \"`\" / \"{\" /", "# \"|\" / \"}\" /", "# \"~\"", "if", "end_or_die", ":", "# We have encountered atext where it is no longer", "# valid", "if", "context_prior", "in", "[", "Context", ".", "COMMENT", ",", "Context", ".", "FWS", "]", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_CFWS'", ")", ")", "elif", "context_prior", "==", "Context", ".", "QUOTEDSTRING", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_QS'", ")", ")", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "else", ":", "context_prior", "=", "context", "o", "=", "ord", "(", "token", ")", "if", "(", "o", "<", "33", "or", "o", ">", "126", "or", "o", "==", "10", "or", "token", "in", "Char", ".", "SPECIALS", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Domain", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "DOMAIN", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# domain = dot-atom / domain-literal / obs-domain", "#", "# dot-atom = [CFWS] dot-atom-text [CFWS]", "#", "# dot-atom-text = 1*atext *(\".\" 1*atext)", "#", "# domain-literal = [CFWS]", "# \"[\" *([FWS] dtext) [FWS] \"]\"", "# [CFWS]", "#", "# dtext = %d33-90 / ; Printable US-ASCII", "# %d94-126 / ; characters not", "# obs-dtext ; including [, ], or \\", "#", "# obs-domain = atom *(\".\" atom)", "#", "# atom = [CFWS] 1*atext [CFWS]", "#", "#", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# Mailbox = Local-part", "# \"@\"", "# ( Domain / address-literal )", "#", "# Domain = sub-domain *(\".\" sub-domain)", "#", "# address-literal = \"[\" ( IPv4-address-literal /", "# IPv6-address-literal /", "# General-address-literal ) \"]\"", "# ; See Section 4.1.3", "#", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# Note: A liberal syntax for the domain portion of", "# addr-spec is given here. However, the domain portion", "# contains addressing information specified by and", "# used in other protocols (e.g., RFC 1034, RFC 1035,", "# RFC 1123, RFC5321). It is therefore incumbent upon", "# implementations to conform to the syntax of", "# addresse for the context in which they are used.", "# is_email() author's note: it's not clear how to interpret", "# this in the context of a general address address", "# validator. The conclusion I have reached is this:", "# \"addressing information\" must comply with RFC 5321 (and", "# in turn RFC 1035), anything that is \"semantically", "# invisible\" must comply only with RFC 5322.", "# Comment", "if", "token", "==", "Char", ".", "OPENPARENTHESIS", ":", "if", "element_len", "==", "0", ":", "# Comments at the start of the domain are", "# deprecated in the text", "# Comments at the start of a subdomain are", "# obs-domain", "# (http://tools.ietf.org/html/rfc5322#section-3.4.1)", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CFWS_NEAR_AT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'COMMENT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'COMMENT'", ")", ")", "# We can't start a comment in the middle of an", "# element, so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "COMMENT", "# Next dot-atom element", "elif", "token", "==", "Char", ".", "DOT", ":", "if", "element_len", "==", "0", ":", "# Another dot, already? Fatal error", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_START'", ")", ")", "else", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CONSECUTIVEDOTS'", ")", ")", "elif", "hyphen_flag", ":", "# Previous subdomain ended in a hyphen. Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOMAINHYPHENEND'", ")", ")", "else", ":", "# Nowhere in RFC 5321 does it say explicitly that", "# the domain part of a Mailbox must be a valid", "# domain according to the DNS standards set out in", "# RFC 1035, but this *is* implied in several", "# places. For instance, wherever the idea of host", "# routing is discussed the RFC says that the domain", "# must be looked up in the DNS. This would be", "# nonsense unless the domain was designed to be a", "# valid DNS domain. Hence we must conclude that the", "# RFC 1035 restriction on label length also applies", "# to RFC 5321 domains.", "#", "# http://tools.ietf.org/html/rfc1035#section-2.3.4", "# labels 63 octets or less", "if", "element_len", ">", "63", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'LABEL_TOOLONG'", ")", ")", "# CFWS is OK again now we're at the beginning of an", "# element (although it may be obsolete CFWS)", "end_or_die", "=", "False", "element_len", "=", "0", "element_count", "+=", "1", "atom_list", "[", "Context", ".", "DOMAIN", "]", ".", "append", "(", "''", ")", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "# Domain literal", "elif", "token", "==", "Char", ".", "OPENSQBRACKET", ":", "if", "parse_data", "[", "Context", ".", "DOMAIN", "]", "==", "''", ":", "# Domain literal must be the only component", "end_or_die", "=", "True", "element_len", "+=", "1", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "LITERAL", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "parse_data", "[", "'literal'", "]", "=", "''", "else", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "i", "+", "1", "==", "raw_length", "or", "(", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "if", "element_len", "==", "0", ":", "if", "element_count", "==", "0", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CFWS_NEAR_AT'", ")", ")", "else", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "# We can't start FWS in the middle of an element,", "# so this better be the end", "end_or_die", "=", "True", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# atext", "else", ":", "# RFC 5322 allows any atext...", "# http://tools.ietf.org/html/rfc5322#section-3.2.3", "# atext = ALPHA / DIGIT / ; Printable US-ASCII", "# \"!\" / \"#\" / ; characters not", "# \"$\" / \"%\" / ; including specials.", "# \"&\" / \"'\" / ; Used for atoms.", "# \"*\" / \"+\" /", "# \"-\" / \"/\" /", "# \"=\" / \"?\" /", "# \"^\" / \"_\" /", "# \"`\" / \"{\" /", "# \"|\" / \"}\" /", "# \"~\"", "# But RFC 5321 only allows letter-digit-hyphen to", "# comply with DNS rules (RFCs 1034 & 1123)", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# sub-domain = Let-dig [Ldh-str]", "#", "# Let-dig = ALPHA / DIGIT", "#", "# Ldh-str = *( ALPHA / DIGIT / \"-\" ) Let-dig", "#", "if", "end_or_die", ":", "# We have encountered atext where it is no longer", "# valid", "if", "context_prior", "in", "[", "Context", ".", "COMMENT", ",", "Context", ".", "FWS", "]", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_CFWS'", ")", ")", "elif", "context_prior", "==", "Context", ".", "LITERAL", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'ATEXT_AFTER_DOMLIT'", ")", ")", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "o", "=", "ord", "(", "token", ")", "# Assume this token isn't a hyphen unless we discover", "# it is", "hyphen_flag", "=", "False", "if", "o", "<", "33", "or", "o", ">", "126", "or", "token", "in", "Char", ".", "SPECIALS", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_ATEXT'", ")", ")", "elif", "token", "==", "Char", ".", "HYPHEN", ":", "if", "element_len", "==", "0", ":", "# Hyphens can't be at the beginning of a", "# subdomain", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOMAINHYPHENSTART'", ")", ")", "hyphen_flag", "=", "True", "elif", "not", "(", "47", "<", "o", "<", "58", "or", "64", "<", "o", "<", "91", "or", "96", "<", "o", "<", "123", ")", ":", "# Not an RFC 5321 subdomain, but still OK by RFC", "# 5322", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAIN'", ")", ")", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Domain literal", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "LITERAL", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# domain-literal = [CFWS]", "# \"[\" *([FWS] dtext) [FWS] \"]\"", "# [CFWS]", "#", "# dtext = %d33-90 / ; Printable US-ASCII", "# %d94-126 / ; characters not", "# obs-dtext ; including [, ], or \\", "#", "# obs-dtext = obs-NO-WS-CTL / quoted-pair", "# End of domain literal", "if", "token", "==", "Char", ".", "CLOSESQBRACKET", ":", "if", "(", "max", "(", "return_status", ")", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "'DEPREC'", "]", ")", ":", "# Could be a valid RFC 5321 address literal, so", "# let's check", "#", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# address-literal = \"[\" ( IPv4-address-literal /", "# IPv6-address-literal /", "# General-address-literal ) \"]\"", "# ; See Section 4.1.3", "#", "# http://tools.ietf.org/html/rfc5321#section-4.1.3", "# IPv4-address-literal = Snum 3(\".\" Snum)", "#", "# IPv6-address-literal = \"IPv6:\" IPv6-addr", "#", "# General-address-literal = Standardized-tag \":\"", "# 1*dcontent", "#", "# Standardized-tag = Ldh-str", "# ; Standardized-tag MUST be", "# ; specified in a", "# ; Standards-Track RFC and", "# ; registered with IANA", "#", "# dcontent = %d33-90 / ; Printable US-ASCII", "# %d94-126 ; excl. \"[\", \"\\\", \"]\"", "#", "# Snum = 1*3DIGIT", "# ; representing a decimal integer", "# ; value in the range 0-255", "#", "# IPv6-addr = IPv6-full / IPv6-comp /", "# IPv6v4-full / IPv6v4-comp", "#", "# IPv6-hex = 1*4HEXDIG", "#", "# IPv6-full = IPv6-hex 7(\":\" IPv6-hex)", "#", "# IPv6-comp = [IPv6-hex *5(\":\" IPv6-hex)]", "# \"::\"", "# [IPv6-hex *5(\":\" IPv6-hex)]", "# ; The \"::\" represents at least 2", "# ; 16-bit groups of zeros. No more", "# ; than 6 groups in addition to", "# ; the \"::\" may be present.", "#", "# IPv6v4-full = IPv6-hex 5(\":\" IPv6-hex) \":\"", "# IPv4-address-literal", "#", "# IPv6v4-comp = [IPv6-hex *3(\":\" IPv6-hex)]", "# \"::\"", "# [IPv6-hex *3(\":\" IPv6-hex) \":\"]", "# IPv4-address-literal", "# ; The \"::\" represents at least 2", "# ; 16-bit groups of zeros. No more", "# ; than 4 groups in addition to", "# ; the \"::\" and", "# ; IPv4-address-literal may be", "# ; present.", "max_groups", "=", "8", "index", "=", "False", "address_literal", "=", "parse_data", "[", "'literal'", "]", "# Extract IPv4 part from the end of the", "# address-literal (if there is one)", "regex", "=", "(", "r\"\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.)\"", "r\"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$\"", ")", "match_ip", "=", "re", ".", "search", "(", "regex", ",", "address_literal", ")", "if", "match_ip", ":", "index", "=", "address_literal", ".", "rfind", "(", "match_ip", ".", "group", "(", "0", ")", ")", "if", "index", "!=", "0", ":", "# Convert IPv4 part to IPv6 format for", "# further testing", "address_literal", "=", "(", "address_literal", "[", "0", ":", "index", "]", "+", "'0:0'", ")", "if", "index", "==", "0", "and", "index", "is", "not", "False", ":", "# Nothing there except a valid IPv4 address", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'ADDRESSLITERAL'", ")", ")", "elif", "not", "address_literal", ".", "startswith", "(", "Char", ".", "IPV6TAG", ")", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAINLITERAL'", ")", ")", "else", ":", "ipv6", "=", "address_literal", "[", "5", ":", "]", "# Revision 2.7: Daniel Marschall's new IPv6", "# testing strategy", "match_ip", "=", "ipv6", ".", "split", "(", "Char", ".", "COLON", ")", "grp_count", "=", "len", "(", "match_ip", ")", "index", "=", "ipv6", ".", "find", "(", "Char", ".", "DOUBLECOLON", ")", "if", "index", "==", "-", "1", ":", "# We need exactly the right number of", "# groups", "if", "grp_count", "!=", "max_groups", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_GRPCOUNT'", ")", ")", "else", ":", "if", "index", "!=", "ipv6", ".", "rfind", "(", "Char", ".", "DOUBLECOLON", ")", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_2X2XCOLON'", ")", ")", "else", ":", "if", "index", "in", "[", "0", ",", "len", "(", "ipv6", ")", "-", "2", "]", ":", "# RFC 4291 allows :: at the start", "# or end of an address with 7 other", "# groups in addition", "max_groups", "+=", "1", "if", "grp_count", ">", "max_groups", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_MAXGRPS'", ")", ")", "elif", "grp_count", "==", "max_groups", ":", "# Eliding a single \"::\"", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'IPV6DEPRECATED'", ")", ")", "# Revision 2.7: Daniel Marschall's new IPv6", "# testing strategy", "if", "(", "ipv6", "[", "0", "]", "==", "Char", ".", "COLON", "and", "ipv6", "[", "1", "]", "!=", "Char", ".", "COLON", ")", ":", "# Address starts with a single colon", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_COLONSTRT'", ")", ")", "elif", "(", "ipv6", "[", "-", "1", "]", "==", "Char", ".", "COLON", "and", "ipv6", "[", "-", "2", "]", "!=", "Char", ".", "COLON", ")", ":", "# Address ends with a single colon", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_COLONEND'", ")", ")", "elif", "(", "[", "re", ".", "match", "(", "r\"^[0-9A-Fa-f]{0,4}$\"", ",", "i", ")", "for", "i", "in", "match_ip", "]", ".", "count", "(", "None", ")", "!=", "0", ")", ":", "# Check for unmatched characters", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'IPV6_BADCHAR'", ")", ")", "else", ":", "return_status", ".", "append", "(", "RFC5321Diagnosis", "(", "'ADDRESSLITERAL'", ")", ")", "else", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAINLITERAL'", ")", ")", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "elif", "token", "==", "Char", ".", "BACKSLASH", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMLIT_OBSDTEXT'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDPAIR", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# dtext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.4.1", "# dtext = %d33-90 / ; Printable US-ASCII", "# %d94-126 / ; characters not", "# obs-dtext ; including [, ], or \\", "#", "# obs-dtext = obs-NO-WS-CTL / quoted-pair", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that do", "# %d12 / ; not include the", "# %d14-31 / ; carriage return, line", "# %d127 ; feed, and white space", "# ; characters", "o", "=", "ord", "(", "token", ")", "# CR, LF, SP & HTAB have already been parsed above", "if", "o", ">", "127", "or", "o", "==", "0", "or", "token", "==", "Char", ".", "OPENSQBRACKET", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_DTEXT'", ")", ")", "break", "elif", "o", "<", "33", "or", "o", "==", "127", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMLIT_OBSDTEXT'", ")", ")", "parse_data", "[", "'literal'", "]", "+=", "token", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Quoted string", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "QUOTEDSTRING", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.4", "# quoted-string = [CFWS]", "# DQUOTE *([FWS] qcontent) [FWS] DQUOTE", "# [CFWS]", "#", "# qcontent = qtext / quoted-pair", "# Quoted pair", "if", "token", "==", "Char", ".", "BACKSLASH", ":", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDPAIR", "# Folding White Space (FWS)", "# Inside a quoted string, spaces are allow as regular", "# characters. It's only FWS if we include HTAB or CRLF", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "# http://tools.ietf.org/html/rfc5322#section-3.2.2", "# Runs of FWS, comment, or CFWS that occur between", "# lexical tokens in a structured header field are", "# semantically interpreted as a single space", "# character.", "# http://tools.ietf.org/html/rfc5322#section-3.2.4", "# the CRLF in any FWS/CFWS that appears within the", "# quoted string [is] semantically \"invisible\" and", "# therefore not part of the quoted-string", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "Char", ".", "SP", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "Char", ".", "SP", "element_len", "+=", "1", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# End of quoted string", "elif", "token", "==", "Char", ".", "DQUOTE", ":", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "# qtext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.4", "# qtext = %d33 / ; Printable US-ASCII", "# %d35-91 / ; characters not", "# %d93-126 / ; including \"\\\" or", "# obs-qtext ; the quote", "# ; character", "#", "# obs-qtext = obs-NO-WS-CTL", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that do", "# %d12 / ; not include the CR,", "# %d14-31 / ; LF, and white space", "# %d127 ; characters", "o", "=", "ord", "(", "token", ")", "if", "o", ">", "127", "or", "o", "==", "0", "or", "o", "==", "10", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_QTEXT'", ")", ")", "elif", "o", "<", "32", "or", "o", "==", "127", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'QTEXT'", ")", ")", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "element_len", "+=", "1", "# -------------------------------------------------------", "# Quoted pair", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "QUOTEDPAIR", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.1", "# quoted-pair = (\"\\\" (VCHAR / WSP)) / obs-qp", "#", "# VCHAR = %d33-126 ; visible (printing)", "# ; characters", "#", "# WSP = SP / HTAB ; white space", "#", "# obs-qp = \"\\\" (%d0 / obs-NO-WS-CTL / LF / CR)", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that do not", "# %d12 / ; include the carriage", "# %d14-31 / ; return, line feed, and", "# %d127 ; white space characters", "#", "# i.e. obs-qp = \"\\\" (%d0-8, %d10-31 / %d127)", "o", "=", "ord", "(", "token", ")", "if", "o", ">", "127", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_QPAIR'", ")", ")", "elif", "(", "o", "<", "31", "and", "o", "!=", "9", ")", "or", "o", "==", "127", ":", "# SP & HTAB are allowed", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'QP'", ")", ")", "# At this point we know where this qpair occurred so", "# we could check to see if the character actually", "# needed to be quoted at all.", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# the sending system SHOULD transmit the", "# form that uses the minimum quoting possible.", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "# End of qpair", "token", "=", "Char", ".", "BACKSLASH", "+", "token", "if", "context", "==", "Context", ".", "COMMENT", ":", "pass", "elif", "context", "==", "Context", ".", "QUOTEDSTRING", ":", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+=", "token", "atom_list", "[", "Context", ".", "LOCALPART", "]", "[", "element_count", "]", "+=", "token", "# The maximum sizes specified by RFC 5321 are octet", "# counts, so we must include the backslash", "element_len", "+=", "2", "elif", "context", "==", "Context", ".", "LITERAL", ":", "parse_data", "[", "Context", ".", "DOMAIN", "]", "+=", "token", "atom_list", "[", "Context", ".", "DOMAIN", "]", "[", "element_count", "]", "+=", "token", "# The maximum sizes specified by RFC 5321 are octet", "# counts, so we must include the backslash", "element_len", "+=", "2", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "# -------------------------------------------------------", "# Comment", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "COMMENT", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.2", "# comment = \"(\" *([FWS] ccontent) [FWS] \")\"", "#", "# ccontent = ctext / quoted-pair / comment", "# Nested comment", "if", "token", "==", "Char", ".", "OPENPARENTHESIS", ":", "# Nested comments are OK", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "COMMENT", "# End of comment", "elif", "token", "==", "Char", ".", "CLOSEPARENTHESIS", ":", "context_prior", "=", "context", "context", "=", "context_stack", ".", "pop", "(", ")", "# Quoted pair", "elif", "token", "==", "Char", ".", "BACKSLASH", ":", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "QUOTEDPAIR", "# Folding White Space (FWS)", "elif", "token", "in", "[", "Char", ".", "CR", ",", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "return_status", ".", "append", "(", "CFWSDiagnosis", "(", "'FWS'", ")", ")", "context_stack", ".", "append", "(", "context", ")", "context", "=", "Context", ".", "FWS", "token_prior", "=", "token", "# ctext", "else", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.3", "# ctext = %d33-39 / ; Printable US-", "# %d42-91 / ; ASCII characters", "# %d93-126 / ; not including", "# obs-ctext ; \"(\", \")\", or \"\\\"", "#", "# obs-ctext = obs-NO-WS-CTL", "#", "# obs-NO-WS-CTL = %d1-8 / ; US-ASCII control", "# %d11 / ; characters that", "# %d12 / ; do not include", "# %d14-31 / ; the CR, LF, and", "# ; white space", "# ; characters", "o", "=", "ord", "(", "token", ")", "if", "o", ">", "127", "or", "o", "==", "0", "or", "o", "==", "10", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'EXPECTING_CTEXT'", ")", ")", "break", "elif", "o", "<", "32", "or", "o", "==", "127", ":", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'CTEXT'", ")", ")", "# -------------------------------------------------------", "# Folding White Space (FWS)", "# -------------------------------------------------------", "elif", "context", "==", "Context", ".", "FWS", ":", "# http://tools.ietf.org/html/rfc5322#section-3.2.2", "# FWS = ([*WSP CRLF] 1*WSP) / obs-FWS", "# ; Folding white space", "#", "# But note the erratum:", "# http://www.rfc-editor.org/errata_search.php?rfc=5322&eid=1908", "# In the obsolete syntax, any amount of folding white", "# space MAY be inserted where the obs-FWS rule is", "# allowed. This creates the possibility of having two", "# consecutive \"folds\" in a line, and therefore the", "# possibility that a line which makes up a folded header", "# field could be composed entirely of white space.", "#", "# obs-FWS = 1*([CRLF] WSP)", "if", "token_prior", "==", "Char", ".", "CR", ":", "if", "token", "==", "Char", ".", "CR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'FWS_CRLF_X2'", ")", ")", "break", "if", "crlf_count", "!=", "-", "1", ":", "crlf_count", "+=", "1", "if", "crlf_count", ">", "1", ":", "# Multiple folds = obsolete FWS", "return_status", ".", "append", "(", "DeprecatedDiagnosis", "(", "'FWS'", ")", ")", "else", ":", "crlf_count", "=", "1", "# Skip simulates the use of ++ operator if the latter", "# check doesn't short-circuit", "if", "token", "==", "Char", ".", "CR", ":", "skip", "=", "True", "if", "(", "i", "+", "1", "==", "raw_length", "or", "to_char", "(", "address", "[", "i", "+", "1", "]", ")", "!=", "Char", ".", "LF", ")", ":", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'CR_NO_LF'", ")", ")", "break", "elif", "token", "in", "[", "Char", ".", "SP", ",", "Char", ".", "HTAB", "]", ":", "pass", "else", ":", "if", "token_prior", "==", "Char", ".", "CR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'FWS_CRLF_END'", ")", ")", "break", "if", "crlf_count", "!=", "-", "1", ":", "crlf_count", "=", "-", "1", "context_prior", "=", "context", "# End of FWS", "context", "=", "context_stack", ".", "pop", "(", ")", "# Look at this token again in the parent context", "repeat", "=", "True", "token_prior", "=", "token", "# -------------------------------------------------------", "# A context we aren't expecting", "# -------------------------------------------------------", "else", ":", "# pragma: no cover", "if", "diagnose", ":", "return", "InvalidDiagnosis", "(", "'BAD_PARSE'", ")", "else", ":", "return", "False", "# No point in going on if we've got a fatal error", "if", "max", "(", "return_status", ")", ">", "BaseDiagnosis", ".", "CATEGORIES", "[", "'RFC5322'", "]", ":", "break", "# Some simple final tests", "if", "max", "(", "return_status", ")", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "'RFC5322'", "]", ":", "if", "context", "==", "Context", ".", "QUOTEDSTRING", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'UNCLOSEDQUOTEDSTR'", ")", ")", "elif", "context", "==", "Context", ".", "QUOTEDPAIR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'BACKSLASHEND'", ")", ")", "elif", "context", "==", "Context", ".", "COMMENT", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'UNCLOSEDCOMMENT'", ")", ")", "elif", "context", "==", "Context", ".", "LITERAL", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'UNCLOSEDDOMLIT'", ")", ")", "elif", "token", "==", "Char", ".", "CR", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'FWS_CRLF_END'", ")", ")", "elif", "parse_data", "[", "Context", ".", "DOMAIN", "]", "==", "''", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'NODOMAIN'", ")", ")", "elif", "element_len", "==", "0", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOT_END'", ")", ")", "elif", "hyphen_flag", ":", "# Fatal error", "return_status", ".", "append", "(", "InvalidDiagnosis", "(", "'DOMAINHYPHENEND'", ")", ")", "# http://tools.ietf.org/html/rfc5321#section-4.5.3.1.2", "# The maximum total length of a domain name or number is 255 octets", "elif", "len", "(", "parse_data", "[", "Context", ".", "DOMAIN", "]", ")", ">", "255", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'DOMAIN_TOOLONG'", ")", ")", "# http://tools.ietf.org/html/rfc5321#section-4.1.2", "# Forward-path = Path", "#", "# Path = \"<\" [ A-d-l \":\" ] Mailbox \">\"", "#", "# http://tools.ietf.org/html/rfc5321#section-4.5.3.1.3", "# The maximum total length of a reverse-path or forward-path is", "# 256 octets (including the punctuation and element separators).", "#", "# Thus, even without (obsolete) routing information, the Mailbox", "# can only be 254 characters long. This is confirmed by this", "# verified erratum to RFC 3696:", "#", "# http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690", "# However, there is a restriction in RFC 2821 on the length of an", "# address in MAIL and RCPT commands of 254 characters. Since", "# addresses that do not fit in those fields are not normally", "# useful, the upper limit on address lengths should normally be", "# considered to be 254.", "elif", "len", "(", "parse_data", "[", "Context", ".", "LOCALPART", "]", "+", "Char", ".", "AT", "+", "parse_data", "[", "Context", ".", "DOMAIN", "]", ")", ">", "254", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'TOOLONG'", ")", ")", "# http://tools.ietf.org/html/rfc1035#section-2.3.4", "# labels 63 octets or less", "elif", "element_len", ">", "63", ":", "return_status", ".", "append", "(", "RFC5322Diagnosis", "(", "'LABEL_TOOLONG'", ")", ")", "return_status", "=", "list", "(", "set", "(", "return_status", ")", ")", "final_status", "=", "max", "(", "return_status", ")", "if", "len", "(", "return_status", ")", "!=", "1", ":", "# Remove redundant ValidDiagnosis", "return_status", ".", "pop", "(", "0", ")", "parse_data", "[", "'status'", "]", "=", "return_status", "if", "final_status", "<", "threshold", ":", "final_status", "=", "ValidDiagnosis", "(", ")", "if", "diagnose", ":", "return", "final_status", "else", ":", "return", "final_status", "<", "BaseDiagnosis", ".", "CATEGORIES", "[", "'THRESHOLD'", "]" ]
Check that an address address conforms to RFCs 5321, 5322 and others. More specifically, see the follow RFCs: * http://tools.ietf.org/html/rfc5321 * http://tools.ietf.org/html/rfc5322 * http://tools.ietf.org/html/rfc4291#section-2.2 * http://tools.ietf.org/html/rfc1123#section-2.1 * http://tools.ietf.org/html/rfc3696) (guidance only) Keyword arguments: address -- address to check. diagnose -- flag to report a diagnosis or a boolean (default False)
[ "Check", "that", "an", "address", "address", "conforms", "to", "RFCs", "5321", "5322", "and", "others", "." ]
dd42d6425c59e5061fc214d42672210dccc64cf5
https://github.com/michaelherold/pyIsEmail/blob/dd42d6425c59e5061fc214d42672210dccc64cf5/pyisemail/validators/parser_validator.py#L65-L1127
train
NeuroanatomyAndConnectivity/surfdist
surfdist/load.py
load_freesurfer_label
def load_freesurfer_label(annot_input, label_name, cortex=None): """ Get source node list for a specified freesurfer label. Inputs ------- annot_input : freesurfer annotation label file label_name : freesurfer label name cortex : not used """ if cortex is not None: print("Warning: cortex is not used to load the freesurfer label") labels, color_table, names = nib.freesurfer.read_annot(annot_input) names = [i.decode('utf-8') for i in names] label_value = names.index(label_name) label_nodes = np.array(np.where(np.in1d(labels, label_value)), dtype=np.int32) return label_nodes
python
def load_freesurfer_label(annot_input, label_name, cortex=None): """ Get source node list for a specified freesurfer label. Inputs ------- annot_input : freesurfer annotation label file label_name : freesurfer label name cortex : not used """ if cortex is not None: print("Warning: cortex is not used to load the freesurfer label") labels, color_table, names = nib.freesurfer.read_annot(annot_input) names = [i.decode('utf-8') for i in names] label_value = names.index(label_name) label_nodes = np.array(np.where(np.in1d(labels, label_value)), dtype=np.int32) return label_nodes
[ "def", "load_freesurfer_label", "(", "annot_input", ",", "label_name", ",", "cortex", "=", "None", ")", ":", "if", "cortex", "is", "not", "None", ":", "print", "(", "\"Warning: cortex is not used to load the freesurfer label\"", ")", "labels", ",", "color_table", ",", "names", "=", "nib", ".", "freesurfer", ".", "read_annot", "(", "annot_input", ")", "names", "=", "[", "i", ".", "decode", "(", "'utf-8'", ")", "for", "i", "in", "names", "]", "label_value", "=", "names", ".", "index", "(", "label_name", ")", "label_nodes", "=", "np", ".", "array", "(", "np", ".", "where", "(", "np", ".", "in1d", "(", "labels", ",", "label_value", ")", ")", ",", "dtype", "=", "np", ".", "int32", ")", "return", "label_nodes" ]
Get source node list for a specified freesurfer label. Inputs ------- annot_input : freesurfer annotation label file label_name : freesurfer label name cortex : not used
[ "Get", "source", "node", "list", "for", "a", "specified", "freesurfer", "label", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/load.py#L5-L24
train
NeuroanatomyAndConnectivity/surfdist
surfdist/load.py
get_freesurfer_label
def get_freesurfer_label(annot_input, verbose = True): """ Print freesurfer label names. """ labels, color_table, names = nib.freesurfer.read_annot(annot_input) if verbose: print(names) return names
python
def get_freesurfer_label(annot_input, verbose = True): """ Print freesurfer label names. """ labels, color_table, names = nib.freesurfer.read_annot(annot_input) if verbose: print(names) return names
[ "def", "get_freesurfer_label", "(", "annot_input", ",", "verbose", "=", "True", ")", ":", "labels", ",", "color_table", ",", "names", "=", "nib", ".", "freesurfer", ".", "read_annot", "(", "annot_input", ")", "if", "verbose", ":", "print", "(", "names", ")", "return", "names" ]
Print freesurfer label names.
[ "Print", "freesurfer", "label", "names", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/load.py#L27-L34
train
NeuroanatomyAndConnectivity/surfdist
surfdist/viz.py
viz
def viz(coords, faces, stat_map=None, elev=0, azim=0, cmap='coolwarm', threshold=None, alpha='auto', bg_map=None, bg_on_stat=False, figsize=None, **kwargs): ''' Visualize results on cortical surface using matplotlib. Inputs ------- coords : numpy array of shape (n_nodes,3), each row specifying the x,y,z coordinates of one node of surface mesh faces : numpy array of shape (n_faces, 3), each row specifying the indices of the three nodes building one node of the surface mesh stat_map : numpy array of shape (n_nodes,) containing the values to be visualized for each node. elev, azim : integers, elevation and azimuth parameters specifying the view on the 3D plot. For Freesurfer surfaces elev=0, azim=0 will give a lateral view for the right and a medial view for the left hemisphere, elev=0, azim=180 will give a medial view for the right and lateral view for the left hemisphere. cmap : Matplotlib colormap, the color range will me forced to be symmetric. Colormaps can be specified as string or colormap object. threshold : float, threshold to be applied to the map, will be applied in positive and negative direction, i.e. values < -abs(threshold) and > abs(threshold) will be shown. alpha : float, determines the opacity of the background mesh, in'auto' mode alpha defaults to .5 when no background map is given, to 1 otherwise. bg_map : numpy array of shape (n_nodes,) to be plotted underneath the statistical map. Specifying a sulcal depth map as bg_map results in realistic shadowing of the surface. bg_on_stat : boolean, specifies whether the statistical map should be multiplied with the background map for shadowing. Otherwise, only areas that are not covered by the statsitical map after thresholding will show shadows. figsize : tuple of intergers, dimensions of the figure that is produced. Output ------ Matplotlib figure object ''' import numpy as np import matplotlib.pyplot as plt import matplotlib.tri as tri from mpl_toolkits.mplot3d import Axes3D # load mesh and derive axes limits faces = np.array(faces, dtype=int) limits = [coords.min(), coords.max()] # set alpha if in auto mode if alpha == 'auto': if bg_map is None: alpha = .5 else: alpha = 1 # if cmap is given as string, translate to matplotlib cmap if type(cmap) == str: cmap = plt.cm.get_cmap(cmap) # initiate figure and 3d axes if figsize is not None: fig = plt.figure(figsize=figsize) else: fig = plt.figure() ax = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits) ax.view_init(elev=elev, azim=azim) ax.set_axis_off() # plot mesh without data p3dcollec = ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=faces, linewidth=0., antialiased=False, color='white') # If depth_map and/or stat_map are provided, map these onto the surface # set_facecolors function of Poly3DCollection is used as passing the # facecolors argument to plot_trisurf does not seem to work if bg_map is not None or stat_map is not None: face_colors = np.ones((faces.shape[0], 4)) face_colors[:, :3] = .5*face_colors[:, :3] if bg_map is not None: bg_data = bg_map if bg_data.shape[0] != coords.shape[0]: raise ValueError('The bg_map does not have the same number ' 'of vertices as the mesh.') bg_faces = np.mean(bg_data[faces], axis=1) bg_faces = bg_faces - bg_faces.min() bg_faces = bg_faces / bg_faces.max() face_colors = plt.cm.gray_r(bg_faces) # modify alpha values of background face_colors[:, 3] = alpha*face_colors[:, 3] if stat_map is not None: stat_map_data = stat_map stat_map_faces = np.mean(stat_map_data[faces], axis=1) # Ensure symmetric colour range, based on Nilearn helper function: # https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/img_plotting.py#L52 vmax = max(-np.nanmin(stat_map_faces), np.nanmax(stat_map_faces)) vmin = -vmax if threshold is not None: kept_indices = np.where(abs(stat_map_faces) >= threshold)[0] stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) * face_colors[kept_indices] else: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) else: stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors = cmap(stat_map_faces) * face_colors else: face_colors = cmap(stat_map_faces) p3dcollec.set_facecolors(face_colors) return fig, ax
python
def viz(coords, faces, stat_map=None, elev=0, azim=0, cmap='coolwarm', threshold=None, alpha='auto', bg_map=None, bg_on_stat=False, figsize=None, **kwargs): ''' Visualize results on cortical surface using matplotlib. Inputs ------- coords : numpy array of shape (n_nodes,3), each row specifying the x,y,z coordinates of one node of surface mesh faces : numpy array of shape (n_faces, 3), each row specifying the indices of the three nodes building one node of the surface mesh stat_map : numpy array of shape (n_nodes,) containing the values to be visualized for each node. elev, azim : integers, elevation and azimuth parameters specifying the view on the 3D plot. For Freesurfer surfaces elev=0, azim=0 will give a lateral view for the right and a medial view for the left hemisphere, elev=0, azim=180 will give a medial view for the right and lateral view for the left hemisphere. cmap : Matplotlib colormap, the color range will me forced to be symmetric. Colormaps can be specified as string or colormap object. threshold : float, threshold to be applied to the map, will be applied in positive and negative direction, i.e. values < -abs(threshold) and > abs(threshold) will be shown. alpha : float, determines the opacity of the background mesh, in'auto' mode alpha defaults to .5 when no background map is given, to 1 otherwise. bg_map : numpy array of shape (n_nodes,) to be plotted underneath the statistical map. Specifying a sulcal depth map as bg_map results in realistic shadowing of the surface. bg_on_stat : boolean, specifies whether the statistical map should be multiplied with the background map for shadowing. Otherwise, only areas that are not covered by the statsitical map after thresholding will show shadows. figsize : tuple of intergers, dimensions of the figure that is produced. Output ------ Matplotlib figure object ''' import numpy as np import matplotlib.pyplot as plt import matplotlib.tri as tri from mpl_toolkits.mplot3d import Axes3D # load mesh and derive axes limits faces = np.array(faces, dtype=int) limits = [coords.min(), coords.max()] # set alpha if in auto mode if alpha == 'auto': if bg_map is None: alpha = .5 else: alpha = 1 # if cmap is given as string, translate to matplotlib cmap if type(cmap) == str: cmap = plt.cm.get_cmap(cmap) # initiate figure and 3d axes if figsize is not None: fig = plt.figure(figsize=figsize) else: fig = plt.figure() ax = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits) ax.view_init(elev=elev, azim=azim) ax.set_axis_off() # plot mesh without data p3dcollec = ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=faces, linewidth=0., antialiased=False, color='white') # If depth_map and/or stat_map are provided, map these onto the surface # set_facecolors function of Poly3DCollection is used as passing the # facecolors argument to plot_trisurf does not seem to work if bg_map is not None or stat_map is not None: face_colors = np.ones((faces.shape[0], 4)) face_colors[:, :3] = .5*face_colors[:, :3] if bg_map is not None: bg_data = bg_map if bg_data.shape[0] != coords.shape[0]: raise ValueError('The bg_map does not have the same number ' 'of vertices as the mesh.') bg_faces = np.mean(bg_data[faces], axis=1) bg_faces = bg_faces - bg_faces.min() bg_faces = bg_faces / bg_faces.max() face_colors = plt.cm.gray_r(bg_faces) # modify alpha values of background face_colors[:, 3] = alpha*face_colors[:, 3] if stat_map is not None: stat_map_data = stat_map stat_map_faces = np.mean(stat_map_data[faces], axis=1) # Ensure symmetric colour range, based on Nilearn helper function: # https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/img_plotting.py#L52 vmax = max(-np.nanmin(stat_map_faces), np.nanmax(stat_map_faces)) vmin = -vmax if threshold is not None: kept_indices = np.where(abs(stat_map_faces) >= threshold)[0] stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) * face_colors[kept_indices] else: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) else: stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors = cmap(stat_map_faces) * face_colors else: face_colors = cmap(stat_map_faces) p3dcollec.set_facecolors(face_colors) return fig, ax
[ "def", "viz", "(", "coords", ",", "faces", ",", "stat_map", "=", "None", ",", "elev", "=", "0", ",", "azim", "=", "0", ",", "cmap", "=", "'coolwarm'", ",", "threshold", "=", "None", ",", "alpha", "=", "'auto'", ",", "bg_map", "=", "None", ",", "bg_on_stat", "=", "False", ",", "figsize", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "numpy", "as", "np", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "matplotlib", ".", "tri", "as", "tri", "from", "mpl_toolkits", ".", "mplot3d", "import", "Axes3D", "# load mesh and derive axes limits", "faces", "=", "np", ".", "array", "(", "faces", ",", "dtype", "=", "int", ")", "limits", "=", "[", "coords", ".", "min", "(", ")", ",", "coords", ".", "max", "(", ")", "]", "# set alpha if in auto mode", "if", "alpha", "==", "'auto'", ":", "if", "bg_map", "is", "None", ":", "alpha", "=", ".5", "else", ":", "alpha", "=", "1", "# if cmap is given as string, translate to matplotlib cmap", "if", "type", "(", "cmap", ")", "==", "str", ":", "cmap", "=", "plt", ".", "cm", ".", "get_cmap", "(", "cmap", ")", "# initiate figure and 3d axes", "if", "figsize", "is", "not", "None", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "else", ":", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ",", "projection", "=", "'3d'", ",", "xlim", "=", "limits", ",", "ylim", "=", "limits", ")", "ax", ".", "view_init", "(", "elev", "=", "elev", ",", "azim", "=", "azim", ")", "ax", ".", "set_axis_off", "(", ")", "# plot mesh without data", "p3dcollec", "=", "ax", ".", "plot_trisurf", "(", "coords", "[", ":", ",", "0", "]", ",", "coords", "[", ":", ",", "1", "]", ",", "coords", "[", ":", ",", "2", "]", ",", "triangles", "=", "faces", ",", "linewidth", "=", "0.", ",", "antialiased", "=", "False", ",", "color", "=", "'white'", ")", "# If depth_map and/or stat_map are provided, map these onto the surface", "# set_facecolors function of Poly3DCollection is used as passing the", "# facecolors argument to plot_trisurf does not seem to work", "if", "bg_map", "is", "not", "None", "or", "stat_map", "is", "not", "None", ":", "face_colors", "=", "np", ".", "ones", "(", "(", "faces", ".", "shape", "[", "0", "]", ",", "4", ")", ")", "face_colors", "[", ":", ",", ":", "3", "]", "=", ".5", "*", "face_colors", "[", ":", ",", ":", "3", "]", "if", "bg_map", "is", "not", "None", ":", "bg_data", "=", "bg_map", "if", "bg_data", ".", "shape", "[", "0", "]", "!=", "coords", ".", "shape", "[", "0", "]", ":", "raise", "ValueError", "(", "'The bg_map does not have the same number '", "'of vertices as the mesh.'", ")", "bg_faces", "=", "np", ".", "mean", "(", "bg_data", "[", "faces", "]", ",", "axis", "=", "1", ")", "bg_faces", "=", "bg_faces", "-", "bg_faces", ".", "min", "(", ")", "bg_faces", "=", "bg_faces", "/", "bg_faces", ".", "max", "(", ")", "face_colors", "=", "plt", ".", "cm", ".", "gray_r", "(", "bg_faces", ")", "# modify alpha values of background", "face_colors", "[", ":", ",", "3", "]", "=", "alpha", "*", "face_colors", "[", ":", ",", "3", "]", "if", "stat_map", "is", "not", "None", ":", "stat_map_data", "=", "stat_map", "stat_map_faces", "=", "np", ".", "mean", "(", "stat_map_data", "[", "faces", "]", ",", "axis", "=", "1", ")", "# Ensure symmetric colour range, based on Nilearn helper function:", "# https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/img_plotting.py#L52", "vmax", "=", "max", "(", "-", "np", ".", "nanmin", "(", "stat_map_faces", ")", ",", "np", ".", "nanmax", "(", "stat_map_faces", ")", ")", "vmin", "=", "-", "vmax", "if", "threshold", "is", "not", "None", ":", "kept_indices", "=", "np", ".", "where", "(", "abs", "(", "stat_map_faces", ")", ">=", "threshold", ")", "[", "0", "]", "stat_map_faces", "=", "stat_map_faces", "-", "vmin", "stat_map_faces", "=", "stat_map_faces", "/", "(", "vmax", "-", "vmin", ")", "if", "bg_on_stat", ":", "face_colors", "[", "kept_indices", "]", "=", "cmap", "(", "stat_map_faces", "[", "kept_indices", "]", ")", "*", "face_colors", "[", "kept_indices", "]", "else", ":", "face_colors", "[", "kept_indices", "]", "=", "cmap", "(", "stat_map_faces", "[", "kept_indices", "]", ")", "else", ":", "stat_map_faces", "=", "stat_map_faces", "-", "vmin", "stat_map_faces", "=", "stat_map_faces", "/", "(", "vmax", "-", "vmin", ")", "if", "bg_on_stat", ":", "face_colors", "=", "cmap", "(", "stat_map_faces", ")", "*", "face_colors", "else", ":", "face_colors", "=", "cmap", "(", "stat_map_faces", ")", "p3dcollec", ".", "set_facecolors", "(", "face_colors", ")", "return", "fig", ",", "ax" ]
Visualize results on cortical surface using matplotlib. Inputs ------- coords : numpy array of shape (n_nodes,3), each row specifying the x,y,z coordinates of one node of surface mesh faces : numpy array of shape (n_faces, 3), each row specifying the indices of the three nodes building one node of the surface mesh stat_map : numpy array of shape (n_nodes,) containing the values to be visualized for each node. elev, azim : integers, elevation and azimuth parameters specifying the view on the 3D plot. For Freesurfer surfaces elev=0, azim=0 will give a lateral view for the right and a medial view for the left hemisphere, elev=0, azim=180 will give a medial view for the right and lateral view for the left hemisphere. cmap : Matplotlib colormap, the color range will me forced to be symmetric. Colormaps can be specified as string or colormap object. threshold : float, threshold to be applied to the map, will be applied in positive and negative direction, i.e. values < -abs(threshold) and > abs(threshold) will be shown. alpha : float, determines the opacity of the background mesh, in'auto' mode alpha defaults to .5 when no background map is given, to 1 otherwise. bg_map : numpy array of shape (n_nodes,) to be plotted underneath the statistical map. Specifying a sulcal depth map as bg_map results in realistic shadowing of the surface. bg_on_stat : boolean, specifies whether the statistical map should be multiplied with the background map for shadowing. Otherwise, only areas that are not covered by the statsitical map after thresholding will show shadows. figsize : tuple of intergers, dimensions of the figure that is produced. Output ------ Matplotlib figure object
[ "Visualize", "results", "on", "cortical", "surface", "using", "matplotlib", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/viz.py#L1-L128
train
NeuroanatomyAndConnectivity/surfdist
surfdist/utils.py
surf_keep_cortex
def surf_keep_cortex(surf, cortex): """ Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex. Inputs ------- surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three nodes building one triangle of the surface mesh. (e.g. the output from nibabel.freesurfer.io.read_geometry) cortex : Array with indices of vertices included in within the cortex. (e.g. the output from nibabel.freesurfer.io.read_label) """ # split surface into vertices and triangles vertices, triangles = surf # keep only the vertices within the cortex label cortex_vertices = np.array(vertices[cortex], dtype=np.float64) # keep only the triangles within the cortex label cortex_triangles = triangles_keep_cortex(triangles, cortex) return cortex_vertices, cortex_triangles
python
def surf_keep_cortex(surf, cortex): """ Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex. Inputs ------- surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three nodes building one triangle of the surface mesh. (e.g. the output from nibabel.freesurfer.io.read_geometry) cortex : Array with indices of vertices included in within the cortex. (e.g. the output from nibabel.freesurfer.io.read_label) """ # split surface into vertices and triangles vertices, triangles = surf # keep only the vertices within the cortex label cortex_vertices = np.array(vertices[cortex], dtype=np.float64) # keep only the triangles within the cortex label cortex_triangles = triangles_keep_cortex(triangles, cortex) return cortex_vertices, cortex_triangles
[ "def", "surf_keep_cortex", "(", "surf", ",", "cortex", ")", ":", "# split surface into vertices and triangles", "vertices", ",", "triangles", "=", "surf", "# keep only the vertices within the cortex label", "cortex_vertices", "=", "np", ".", "array", "(", "vertices", "[", "cortex", "]", ",", "dtype", "=", "np", ".", "float64", ")", "# keep only the triangles within the cortex label", "cortex_triangles", "=", "triangles_keep_cortex", "(", "triangles", ",", "cortex", ")", "return", "cortex_vertices", ",", "cortex_triangles" ]
Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex. Inputs ------- surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three nodes building one triangle of the surface mesh. (e.g. the output from nibabel.freesurfer.io.read_geometry) cortex : Array with indices of vertices included in within the cortex. (e.g. the output from nibabel.freesurfer.io.read_label)
[ "Remove", "medial", "wall", "from", "cortical", "surface", "to", "ensure", "that", "shortest", "paths", "are", "only", "calculated", "through", "the", "cortex", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/utils.py#L5-L28
train
NeuroanatomyAndConnectivity/surfdist
surfdist/utils.py
triangles_keep_cortex
def triangles_keep_cortex(triangles, cortex): """ Remove triangles with nodes not contained in the cortex label array """ # for or each face/triangle keep only those that only contain nodes within the list of cortex nodes input_shape = triangles.shape triangle_is_in_cortex = np.all(np.reshape(np.in1d(triangles.ravel(), cortex), input_shape), axis=1) cortex_triangles_old = np.array(triangles[triangle_is_in_cortex], dtype=np.int32) # reassign node index before outputting triangles new_index = np.digitize(cortex_triangles_old.ravel(), cortex, right=True) cortex_triangles = np.array(np.arange(len(cortex))[new_index].reshape(cortex_triangles_old.shape), dtype=np.int32) return cortex_triangles
python
def triangles_keep_cortex(triangles, cortex): """ Remove triangles with nodes not contained in the cortex label array """ # for or each face/triangle keep only those that only contain nodes within the list of cortex nodes input_shape = triangles.shape triangle_is_in_cortex = np.all(np.reshape(np.in1d(triangles.ravel(), cortex), input_shape), axis=1) cortex_triangles_old = np.array(triangles[triangle_is_in_cortex], dtype=np.int32) # reassign node index before outputting triangles new_index = np.digitize(cortex_triangles_old.ravel(), cortex, right=True) cortex_triangles = np.array(np.arange(len(cortex))[new_index].reshape(cortex_triangles_old.shape), dtype=np.int32) return cortex_triangles
[ "def", "triangles_keep_cortex", "(", "triangles", ",", "cortex", ")", ":", "# for or each face/triangle keep only those that only contain nodes within the list of cortex nodes", "input_shape", "=", "triangles", ".", "shape", "triangle_is_in_cortex", "=", "np", ".", "all", "(", "np", ".", "reshape", "(", "np", ".", "in1d", "(", "triangles", ".", "ravel", "(", ")", ",", "cortex", ")", ",", "input_shape", ")", ",", "axis", "=", "1", ")", "cortex_triangles_old", "=", "np", ".", "array", "(", "triangles", "[", "triangle_is_in_cortex", "]", ",", "dtype", "=", "np", ".", "int32", ")", "# reassign node index before outputting triangles", "new_index", "=", "np", ".", "digitize", "(", "cortex_triangles_old", ".", "ravel", "(", ")", ",", "cortex", ",", "right", "=", "True", ")", "cortex_triangles", "=", "np", ".", "array", "(", "np", ".", "arange", "(", "len", "(", "cortex", ")", ")", "[", "new_index", "]", ".", "reshape", "(", "cortex_triangles_old", ".", "shape", ")", ",", "dtype", "=", "np", ".", "int32", ")", "return", "cortex_triangles" ]
Remove triangles with nodes not contained in the cortex label array
[ "Remove", "triangles", "with", "nodes", "not", "contained", "in", "the", "cortex", "label", "array" ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/utils.py#L31-L46
train
NeuroanatomyAndConnectivity/surfdist
surfdist/utils.py
translate_src
def translate_src(src, cortex): """ Convert source nodes to new surface (without medial wall). """ src_new = np.array(np.where(np.in1d(cortex, src))[0], dtype=np.int32) return src_new
python
def translate_src(src, cortex): """ Convert source nodes to new surface (without medial wall). """ src_new = np.array(np.where(np.in1d(cortex, src))[0], dtype=np.int32) return src_new
[ "def", "translate_src", "(", "src", ",", "cortex", ")", ":", "src_new", "=", "np", ".", "array", "(", "np", ".", "where", "(", "np", ".", "in1d", "(", "cortex", ",", "src", ")", ")", "[", "0", "]", ",", "dtype", "=", "np", ".", "int32", ")", "return", "src_new" ]
Convert source nodes to new surface (without medial wall).
[ "Convert", "source", "nodes", "to", "new", "surface", "(", "without", "medial", "wall", ")", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/utils.py#L49-L55
train
NeuroanatomyAndConnectivity/surfdist
surfdist/utils.py
recort
def recort(input_data, surf, cortex): """ Return data values to space of full cortex (including medial wall), with medial wall equal to zero. """ data = np.zeros(len(surf[0])) data[cortex] = input_data return data
python
def recort(input_data, surf, cortex): """ Return data values to space of full cortex (including medial wall), with medial wall equal to zero. """ data = np.zeros(len(surf[0])) data[cortex] = input_data return data
[ "def", "recort", "(", "input_data", ",", "surf", ",", "cortex", ")", ":", "data", "=", "np", ".", "zeros", "(", "len", "(", "surf", "[", "0", "]", ")", ")", "data", "[", "cortex", "]", "=", "input_data", "return", "data" ]
Return data values to space of full cortex (including medial wall), with medial wall equal to zero.
[ "Return", "data", "values", "to", "space", "of", "full", "cortex", "(", "including", "medial", "wall", ")", "with", "medial", "wall", "equal", "to", "zero", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/utils.py#L57-L63
train
NeuroanatomyAndConnectivity/surfdist
surfdist/utils.py
find_node_match
def find_node_match(simple_vertices, complex_vertices): """ Thanks to juhuntenburg. Functions taken from https://github.com/juhuntenburg/brainsurfacescripts Finds those points on the complex mesh that correspond best to the simple mesh while forcing a one-to-one mapping. """ import scipy.spatial # make array for writing in final voronoi seed indices voronoi_seed_idx = np.zeros((simple_vertices.shape[0],), dtype='int64')-1 missing = np.where(voronoi_seed_idx == -1)[0].shape[0] mapping_single = np.zeros_like(voronoi_seed_idx) neighbours = 0 col = 0 while missing != 0: neighbours += 100 # find nearest neighbours inaccuracy, mapping = scipy.spatial.KDTree( complex_vertices).query(simple_vertices, k=neighbours) # go through columns of nearest neighbours until unique mapping is # achieved, if not before end of neighbours, extend number of # neighbours while col < neighbours: # find all missing voronoi seed indices missing_idx = np.where(voronoi_seed_idx == -1)[0] missing = missing_idx.shape[0] if missing == 0: break else: # for missing entries fill in next neighbour mapping_single[missing_idx] = np.copy( mapping[missing_idx, col]) # find unique values in mapping_single unique, double_idx = np.unique( mapping_single, return_inverse=True) # empty voronoi seed index voronoi_seed_idx = np.zeros( (simple_vertices.shape[0],), dtype='int64')-1 # fill voronoi seed idx with unique values for u in range(unique.shape[0]): # find the indices of this value in mapping entries = np.where(double_idx == u)[0] # set the first entry to the value voronoi_seed_idx[entries[0]] = unique[u] # go to next column col += 1 return voronoi_seed_idx, inaccuracy
python
def find_node_match(simple_vertices, complex_vertices): """ Thanks to juhuntenburg. Functions taken from https://github.com/juhuntenburg/brainsurfacescripts Finds those points on the complex mesh that correspond best to the simple mesh while forcing a one-to-one mapping. """ import scipy.spatial # make array for writing in final voronoi seed indices voronoi_seed_idx = np.zeros((simple_vertices.shape[0],), dtype='int64')-1 missing = np.where(voronoi_seed_idx == -1)[0].shape[0] mapping_single = np.zeros_like(voronoi_seed_idx) neighbours = 0 col = 0 while missing != 0: neighbours += 100 # find nearest neighbours inaccuracy, mapping = scipy.spatial.KDTree( complex_vertices).query(simple_vertices, k=neighbours) # go through columns of nearest neighbours until unique mapping is # achieved, if not before end of neighbours, extend number of # neighbours while col < neighbours: # find all missing voronoi seed indices missing_idx = np.where(voronoi_seed_idx == -1)[0] missing = missing_idx.shape[0] if missing == 0: break else: # for missing entries fill in next neighbour mapping_single[missing_idx] = np.copy( mapping[missing_idx, col]) # find unique values in mapping_single unique, double_idx = np.unique( mapping_single, return_inverse=True) # empty voronoi seed index voronoi_seed_idx = np.zeros( (simple_vertices.shape[0],), dtype='int64')-1 # fill voronoi seed idx with unique values for u in range(unique.shape[0]): # find the indices of this value in mapping entries = np.where(double_idx == u)[0] # set the first entry to the value voronoi_seed_idx[entries[0]] = unique[u] # go to next column col += 1 return voronoi_seed_idx, inaccuracy
[ "def", "find_node_match", "(", "simple_vertices", ",", "complex_vertices", ")", ":", "import", "scipy", ".", "spatial", "# make array for writing in final voronoi seed indices", "voronoi_seed_idx", "=", "np", ".", "zeros", "(", "(", "simple_vertices", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "'int64'", ")", "-", "1", "missing", "=", "np", ".", "where", "(", "voronoi_seed_idx", "==", "-", "1", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "mapping_single", "=", "np", ".", "zeros_like", "(", "voronoi_seed_idx", ")", "neighbours", "=", "0", "col", "=", "0", "while", "missing", "!=", "0", ":", "neighbours", "+=", "100", "# find nearest neighbours", "inaccuracy", ",", "mapping", "=", "scipy", ".", "spatial", ".", "KDTree", "(", "complex_vertices", ")", ".", "query", "(", "simple_vertices", ",", "k", "=", "neighbours", ")", "# go through columns of nearest neighbours until unique mapping is", "# achieved, if not before end of neighbours, extend number of", "# neighbours", "while", "col", "<", "neighbours", ":", "# find all missing voronoi seed indices", "missing_idx", "=", "np", ".", "where", "(", "voronoi_seed_idx", "==", "-", "1", ")", "[", "0", "]", "missing", "=", "missing_idx", ".", "shape", "[", "0", "]", "if", "missing", "==", "0", ":", "break", "else", ":", "# for missing entries fill in next neighbour", "mapping_single", "[", "missing_idx", "]", "=", "np", ".", "copy", "(", "mapping", "[", "missing_idx", ",", "col", "]", ")", "# find unique values in mapping_single", "unique", ",", "double_idx", "=", "np", ".", "unique", "(", "mapping_single", ",", "return_inverse", "=", "True", ")", "# empty voronoi seed index", "voronoi_seed_idx", "=", "np", ".", "zeros", "(", "(", "simple_vertices", ".", "shape", "[", "0", "]", ",", ")", ",", "dtype", "=", "'int64'", ")", "-", "1", "# fill voronoi seed idx with unique values", "for", "u", "in", "range", "(", "unique", ".", "shape", "[", "0", "]", ")", ":", "# find the indices of this value in mapping", "entries", "=", "np", ".", "where", "(", "double_idx", "==", "u", ")", "[", "0", "]", "# set the first entry to the value", "voronoi_seed_idx", "[", "entries", "[", "0", "]", "]", "=", "unique", "[", "u", "]", "# go to next column", "col", "+=", "1", "return", "voronoi_seed_idx", ",", "inaccuracy" ]
Thanks to juhuntenburg. Functions taken from https://github.com/juhuntenburg/brainsurfacescripts Finds those points on the complex mesh that correspond best to the simple mesh while forcing a one-to-one mapping.
[ "Thanks", "to", "juhuntenburg", ".", "Functions", "taken", "from", "https", ":", "//", "github", ".", "com", "/", "juhuntenburg", "/", "brainsurfacescripts" ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/utils.py#L66-L119
train
robhowley/nhlscrapi
nhlscrapi/scrapr/eventsummrep.py
EventSummRep.parse
def parse(self): """ Retreive and parse Event Summary report for the given :py:class:`nhlscrapi.games.game.GameKey` :returns: ``self`` on success, ``None`` otherwise """ try: return super(EventSummRep, self).parse() \ .parse_away_shots() \ .parse_home_shots() \ .parse_away_fo() \ .parse_home_fo() \ .parse_away_by_player() \ .parse_home_by_player() except: return None
python
def parse(self): """ Retreive and parse Event Summary report for the given :py:class:`nhlscrapi.games.game.GameKey` :returns: ``self`` on success, ``None`` otherwise """ try: return super(EventSummRep, self).parse() \ .parse_away_shots() \ .parse_home_shots() \ .parse_away_fo() \ .parse_home_fo() \ .parse_away_by_player() \ .parse_home_by_player() except: return None
[ "def", "parse", "(", "self", ")", ":", "try", ":", "return", "super", "(", "EventSummRep", ",", "self", ")", ".", "parse", "(", ")", ".", "parse_away_shots", "(", ")", ".", "parse_home_shots", "(", ")", ".", "parse_away_fo", "(", ")", ".", "parse_home_fo", "(", ")", ".", "parse_away_by_player", "(", ")", ".", "parse_home_by_player", "(", ")", "except", ":", "return", "None" ]
Retreive and parse Event Summary report for the given :py:class:`nhlscrapi.games.game.GameKey` :returns: ``self`` on success, ``None`` otherwise
[ "Retreive", "and", "parse", "Event", "Summary", "report", "for", "the", "given", ":", "py", ":", "class", ":", "nhlscrapi", ".", "games", ".", "game", ".", "GameKey", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/eventsummrep.py#L86-L101
train
robhowley/nhlscrapi
nhlscrapi/scrapr/eventsummrep.py
EventSummRep.parse_home_shots
def parse_home_shots(self): """ Parse shot info for home team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_shot_tables() self.shots['home'] = self.__parse_shot_tables( self.__home_top, self.__home_bot ) return self except: return None
python
def parse_home_shots(self): """ Parse shot info for home team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_shot_tables() self.shots['home'] = self.__parse_shot_tables( self.__home_top, self.__home_bot ) return self except: return None
[ "def", "parse_home_shots", "(", "self", ")", ":", "try", ":", "self", ".", "__set_shot_tables", "(", ")", "self", ".", "shots", "[", "'home'", "]", "=", "self", ".", "__parse_shot_tables", "(", "self", ".", "__home_top", ",", "self", ".", "__home_bot", ")", "return", "self", "except", ":", "return", "None" ]
Parse shot info for home team. :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "shot", "info", "for", "home", "team", ".", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/eventsummrep.py#L149-L163
train
robhowley/nhlscrapi
nhlscrapi/scrapr/eventsummrep.py
EventSummRep.parse_away_shots
def parse_away_shots(self): """ Parse shot info for away team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_shot_tables() self.shots['away'] = self.__parse_shot_tables( self.__aw_top, self.__aw_bot ) return self except: return None
python
def parse_away_shots(self): """ Parse shot info for away team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_shot_tables() self.shots['away'] = self.__parse_shot_tables( self.__aw_top, self.__aw_bot ) return self except: return None
[ "def", "parse_away_shots", "(", "self", ")", ":", "try", ":", "self", ".", "__set_shot_tables", "(", ")", "self", ".", "shots", "[", "'away'", "]", "=", "self", ".", "__parse_shot_tables", "(", "self", ".", "__aw_top", ",", "self", ".", "__aw_bot", ")", "return", "self", "except", ":", "return", "None" ]
Parse shot info for away team. :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "shot", "info", "for", "away", "team", ".", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/eventsummrep.py#L165-L179
train
robhowley/nhlscrapi
nhlscrapi/scrapr/eventsummrep.py
EventSummRep.parse_home_fo
def parse_home_fo(self): """ Parse face-off info for home team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_fo_tables() self.face_offs['home'] = self.__parse_fo_table(self.__home_fo) return self except: return None
python
def parse_home_fo(self): """ Parse face-off info for home team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_fo_tables() self.face_offs['home'] = self.__parse_fo_table(self.__home_fo) return self except: return None
[ "def", "parse_home_fo", "(", "self", ")", ":", "try", ":", "self", ".", "__set_fo_tables", "(", ")", "self", ".", "face_offs", "[", "'home'", "]", "=", "self", ".", "__parse_fo_table", "(", "self", ".", "__home_fo", ")", "return", "self", "except", ":", "return", "None" ]
Parse face-off info for home team. :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "face", "-", "off", "info", "for", "home", "team", ".", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/eventsummrep.py#L181-L192
train
robhowley/nhlscrapi
nhlscrapi/scrapr/eventsummrep.py
EventSummRep.parse_away_fo
def parse_away_fo(self): """ Parse face-off info for away team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_fo_tables() self.face_offs['away'] = self.__parse_fo_table(self.__away_fo) return self except: return None
python
def parse_away_fo(self): """ Parse face-off info for away team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_fo_tables() self.face_offs['away'] = self.__parse_fo_table(self.__away_fo) return self except: return None
[ "def", "parse_away_fo", "(", "self", ")", ":", "try", ":", "self", ".", "__set_fo_tables", "(", ")", "self", ".", "face_offs", "[", "'away'", "]", "=", "self", ".", "__parse_fo_table", "(", "self", ".", "__away_fo", ")", "return", "self", "except", ":", "return", "None" ]
Parse face-off info for away team. :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "face", "-", "off", "info", "for", "away", "team", ".", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/eventsummrep.py#L194-L205
train
NeuroanatomyAndConnectivity/surfdist
surfdist/surfdist.py
dist_calc
def dist_calc(surf, cortex, source_nodes): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "dist_type" specifies whether to calculate "min", "mean", "median", or "max" distance values from a region-of-interest. If running only on single node, defaults to "min". """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) translated_source_nodes = translate_src(source_nodes, cortex) data = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) dist = recort(data, surf, cortex) del data return dist
python
def dist_calc(surf, cortex, source_nodes): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "dist_type" specifies whether to calculate "min", "mean", "median", or "max" distance values from a region-of-interest. If running only on single node, defaults to "min". """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) translated_source_nodes = translate_src(source_nodes, cortex) data = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) dist = recort(data, surf, cortex) del data return dist
[ "def", "dist_calc", "(", "surf", ",", "cortex", ",", "source_nodes", ")", ":", "cortex_vertices", ",", "cortex_triangles", "=", "surf_keep_cortex", "(", "surf", ",", "cortex", ")", "translated_source_nodes", "=", "translate_src", "(", "source_nodes", ",", "cortex", ")", "data", "=", "gdist", ".", "compute_gdist", "(", "cortex_vertices", ",", "cortex_triangles", ",", "source_indices", "=", "translated_source_nodes", ")", "dist", "=", "recort", "(", "data", ",", "surf", ",", "cortex", ")", "del", "data", "return", "dist" ]
Calculate exact geodesic distance along cortical surface from set of source nodes. "dist_type" specifies whether to calculate "min", "mean", "median", or "max" distance values from a region-of-interest. If running only on single node, defaults to "min".
[ "Calculate", "exact", "geodesic", "distance", "along", "cortical", "surface", "from", "set", "of", "source", "nodes", ".", "dist_type", "specifies", "whether", "to", "calculate", "min", "mean", "median", "or", "max", "distance", "values", "from", "a", "region", "-", "of", "-", "interest", ".", "If", "running", "only", "on", "single", "node", "defaults", "to", "min", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/surfdist.py#L6-L20
train
NeuroanatomyAndConnectivity/surfdist
surfdist/surfdist.py
zone_calc
def zone_calc(surf, cortex, src): """ Calculate closest nodes to each source node using exact geodesic distance along the cortical surface. """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) dist_vals = np.zeros((len(source_nodes), len(cortex_vertices))) for x in range(len(source_nodes)): translated_source_nodes = translate_src(source_nodes[x], cortex) dist_vals[x, :] = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) data = np.argsort(dist_vals, axis=0)[0, :] + 1 zone = recort(data, surf, cortex) del data return zone
python
def zone_calc(surf, cortex, src): """ Calculate closest nodes to each source node using exact geodesic distance along the cortical surface. """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) dist_vals = np.zeros((len(source_nodes), len(cortex_vertices))) for x in range(len(source_nodes)): translated_source_nodes = translate_src(source_nodes[x], cortex) dist_vals[x, :] = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) data = np.argsort(dist_vals, axis=0)[0, :] + 1 zone = recort(data, surf, cortex) del data return zone
[ "def", "zone_calc", "(", "surf", ",", "cortex", ",", "src", ")", ":", "cortex_vertices", ",", "cortex_triangles", "=", "surf_keep_cortex", "(", "surf", ",", "cortex", ")", "dist_vals", "=", "np", ".", "zeros", "(", "(", "len", "(", "source_nodes", ")", ",", "len", "(", "cortex_vertices", ")", ")", ")", "for", "x", "in", "range", "(", "len", "(", "source_nodes", ")", ")", ":", "translated_source_nodes", "=", "translate_src", "(", "source_nodes", "[", "x", "]", ",", "cortex", ")", "dist_vals", "[", "x", ",", ":", "]", "=", "gdist", ".", "compute_gdist", "(", "cortex_vertices", ",", "cortex_triangles", ",", "source_indices", "=", "translated_source_nodes", ")", "data", "=", "np", ".", "argsort", "(", "dist_vals", ",", "axis", "=", "0", ")", "[", "0", ",", ":", "]", "+", "1", "zone", "=", "recort", "(", "data", ",", "surf", ",", "cortex", ")", "del", "data", "return", "zone" ]
Calculate closest nodes to each source node using exact geodesic distance along the cortical surface.
[ "Calculate", "closest", "nodes", "to", "each", "source", "node", "using", "exact", "geodesic", "distance", "along", "the", "cortical", "surface", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/surfdist.py#L23-L43
train
NeuroanatomyAndConnectivity/surfdist
surfdist/surfdist.py
dist_calc_matrix
def dist_calc_matrix(surf, cortex, labels, exceptions = ['Unknown', 'Medial_wall'], verbose = True): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) # remove exceptions from label list: label_list = sd.load.get_freesurfer_label(labels, verbose = False) rs = np.where([a not in exceptions for a in label_list])[0] rois = [label_list[r] for r in rs] if verbose: print("# of regions: " + str(len(rois))) # calculate distance from each region to all nodes: dist_roi = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_roi.append(gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes)) if verbose: print(roi) dist_roi = np.array(dist_roi) # Calculate min distance per region: dist_mat = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_mat.append(np.min(dist_roi[:,translated_source_nodes], axis = 1)) dist_mat = np.array(dist_mat) return dist_mat, rois
python
def dist_calc_matrix(surf, cortex, labels, exceptions = ['Unknown', 'Medial_wall'], verbose = True): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) # remove exceptions from label list: label_list = sd.load.get_freesurfer_label(labels, verbose = False) rs = np.where([a not in exceptions for a in label_list])[0] rois = [label_list[r] for r in rs] if verbose: print("# of regions: " + str(len(rois))) # calculate distance from each region to all nodes: dist_roi = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_roi.append(gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes)) if verbose: print(roi) dist_roi = np.array(dist_roi) # Calculate min distance per region: dist_mat = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_mat.append(np.min(dist_roi[:,translated_source_nodes], axis = 1)) dist_mat = np.array(dist_mat) return dist_mat, rois
[ "def", "dist_calc_matrix", "(", "surf", ",", "cortex", ",", "labels", ",", "exceptions", "=", "[", "'Unknown'", ",", "'Medial_wall'", "]", ",", "verbose", "=", "True", ")", ":", "cortex_vertices", ",", "cortex_triangles", "=", "surf_keep_cortex", "(", "surf", ",", "cortex", ")", "# remove exceptions from label list:", "label_list", "=", "sd", ".", "load", ".", "get_freesurfer_label", "(", "labels", ",", "verbose", "=", "False", ")", "rs", "=", "np", ".", "where", "(", "[", "a", "not", "in", "exceptions", "for", "a", "in", "label_list", "]", ")", "[", "0", "]", "rois", "=", "[", "label_list", "[", "r", "]", "for", "r", "in", "rs", "]", "if", "verbose", ":", "print", "(", "\"# of regions: \"", "+", "str", "(", "len", "(", "rois", ")", ")", ")", "# calculate distance from each region to all nodes:", "dist_roi", "=", "[", "]", "for", "roi", "in", "rois", ":", "source_nodes", "=", "sd", ".", "load", ".", "load_freesurfer_label", "(", "labels", ",", "roi", ")", "translated_source_nodes", "=", "translate_src", "(", "source_nodes", ",", "cortex", ")", "dist_roi", ".", "append", "(", "gdist", ".", "compute_gdist", "(", "cortex_vertices", ",", "cortex_triangles", ",", "source_indices", "=", "translated_source_nodes", ")", ")", "if", "verbose", ":", "print", "(", "roi", ")", "dist_roi", "=", "np", ".", "array", "(", "dist_roi", ")", "# Calculate min distance per region:", "dist_mat", "=", "[", "]", "for", "roi", "in", "rois", ":", "source_nodes", "=", "sd", ".", "load", ".", "load_freesurfer_label", "(", "labels", ",", "roi", ")", "translated_source_nodes", "=", "translate_src", "(", "source_nodes", ",", "cortex", ")", "dist_mat", ".", "append", "(", "np", ".", "min", "(", "dist_roi", "[", ":", ",", "translated_source_nodes", "]", ",", "axis", "=", "1", ")", ")", "dist_mat", "=", "np", ".", "array", "(", "dist_mat", ")", "return", "dist_mat", ",", "rois" ]
Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n
[ "Calculate", "exact", "geodesic", "distance", "along", "cortical", "surface", "from", "set", "of", "source", "nodes", ".", "labels", "specifies", "the", "freesurfer", "label", "file", "to", "use", ".", "All", "values", "will", "be", "used", "other", "than", "those", "specified", "in", "exceptions", "(", "default", ":", "Unknown", "and", "Medial_Wall", ")", "." ]
849fdfbb2822ff1aa530a3b0bc955a4312e3edf1
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/surfdist/surfdist.py#L46-L85
train
robhowley/nhlscrapi
nhlscrapi/scrapr/faceoffrep.py
FaceOffRep.parse
def parse(self): """ Retreive and parse Play by Play data for the given nhlscrapi.GameKey :returns: ``self`` on success, ``None`` otherwise """ try: return ( super(FaceOffRep, self).parse() and self.parse_home_face_offs() and self.parse_away_face_offs() ) except: return None
python
def parse(self): """ Retreive and parse Play by Play data for the given nhlscrapi.GameKey :returns: ``self`` on success, ``None`` otherwise """ try: return ( super(FaceOffRep, self).parse() and self.parse_home_face_offs() and self.parse_away_face_offs() ) except: return None
[ "def", "parse", "(", "self", ")", ":", "try", ":", "return", "(", "super", "(", "FaceOffRep", ",", "self", ")", ".", "parse", "(", ")", "and", "self", ".", "parse_home_face_offs", "(", ")", "and", "self", ".", "parse_away_face_offs", "(", ")", ")", "except", ":", "return", "None" ]
Retreive and parse Play by Play data for the given nhlscrapi.GameKey :returns: ``self`` on success, ``None`` otherwise
[ "Retreive", "and", "parse", "Play", "by", "Play", "data", "for", "the", "given", "nhlscrapi", ".", "GameKey", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/faceoffrep.py#L53-L66
train
robhowley/nhlscrapi
nhlscrapi/scrapr/faceoffrep.py
FaceOffRep.parse_home_face_offs
def parse_home_face_offs(self): """ Parse only the home faceoffs :returns: ``self`` on success, ``None`` otherwise """ self.__set_team_docs() self.face_offs['home'] = FaceOffRep.__read_team_doc(self.__home_doc) return self
python
def parse_home_face_offs(self): """ Parse only the home faceoffs :returns: ``self`` on success, ``None`` otherwise """ self.__set_team_docs() self.face_offs['home'] = FaceOffRep.__read_team_doc(self.__home_doc) return self
[ "def", "parse_home_face_offs", "(", "self", ")", ":", "self", ".", "__set_team_docs", "(", ")", "self", ".", "face_offs", "[", "'home'", "]", "=", "FaceOffRep", ".", "__read_team_doc", "(", "self", ".", "__home_doc", ")", "return", "self" ]
Parse only the home faceoffs :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "only", "the", "home", "faceoffs", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/faceoffrep.py#L68-L76
train
robhowley/nhlscrapi
nhlscrapi/scrapr/faceoffrep.py
FaceOffRep.parse_away_face_offs
def parse_away_face_offs(self): """ Parse only the away faceoffs :returns: ``self`` on success, ``None`` otherwise """ self.__set_team_docs() self.face_offs['away'] = FaceOffRep.__read_team_doc(self.__vis_doc) return self
python
def parse_away_face_offs(self): """ Parse only the away faceoffs :returns: ``self`` on success, ``None`` otherwise """ self.__set_team_docs() self.face_offs['away'] = FaceOffRep.__read_team_doc(self.__vis_doc) return self
[ "def", "parse_away_face_offs", "(", "self", ")", ":", "self", ".", "__set_team_docs", "(", ")", "self", ".", "face_offs", "[", "'away'", "]", "=", "FaceOffRep", ".", "__read_team_doc", "(", "self", ".", "__vis_doc", ")", "return", "self" ]
Parse only the away faceoffs :returns: ``self`` on success, ``None`` otherwise
[ "Parse", "only", "the", "away", "faceoffs", ":", "returns", ":", "self", "on", "success", "None", "otherwise" ]
2273683497ff27b0e92c8d1557ff0ce962dbf43b
https://github.com/robhowley/nhlscrapi/blob/2273683497ff27b0e92c8d1557ff0ce962dbf43b/nhlscrapi/scrapr/faceoffrep.py#L78-L86
train
linkedin/Zopkio
zopkio/utils.py
load_module
def load_module(filename): """ Loads a module by filename """ basename = os.path.basename(filename) path = os.path.dirname(filename) sys.path.append(path) # TODO(tlan) need to figure out how to handle errors thrown here return __import__(os.path.splitext(basename)[0])
python
def load_module(filename): """ Loads a module by filename """ basename = os.path.basename(filename) path = os.path.dirname(filename) sys.path.append(path) # TODO(tlan) need to figure out how to handle errors thrown here return __import__(os.path.splitext(basename)[0])
[ "def", "load_module", "(", "filename", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "path", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "sys", ".", "path", ".", "append", "(", "path", ")", "# TODO(tlan) need to figure out how to handle errors thrown here", "return", "__import__", "(", "os", ".", "path", ".", "splitext", "(", "basename", ")", "[", "0", "]", ")" ]
Loads a module by filename
[ "Loads", "a", "module", "by", "filename" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/utils.py#L58-L66
train
linkedin/Zopkio
zopkio/utils.py
make_machine_mapping
def make_machine_mapping(machine_list): """ Convert the machine list argument from a list of names into a mapping of logical names to physical hosts. This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access """ if machine_list is None: return {} else: mapping = {} for pair in machine_list: if (constants.MACHINE_SEPARATOR not in pair) or (pair.count(constants.MACHINE_SEPARATOR) != 1): raise ValueError("machine pairs must be passed as two strings separted by a %s", constants.MACHINE_SEPARATOR) (logical, physical) = pair.split(constants.MACHINE_SEPARATOR) # add checks for reachability mapping[logical] = physical return mapping
python
def make_machine_mapping(machine_list): """ Convert the machine list argument from a list of names into a mapping of logical names to physical hosts. This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access """ if machine_list is None: return {} else: mapping = {} for pair in machine_list: if (constants.MACHINE_SEPARATOR not in pair) or (pair.count(constants.MACHINE_SEPARATOR) != 1): raise ValueError("machine pairs must be passed as two strings separted by a %s", constants.MACHINE_SEPARATOR) (logical, physical) = pair.split(constants.MACHINE_SEPARATOR) # add checks for reachability mapping[logical] = physical return mapping
[ "def", "make_machine_mapping", "(", "machine_list", ")", ":", "if", "machine_list", "is", "None", ":", "return", "{", "}", "else", ":", "mapping", "=", "{", "}", "for", "pair", "in", "machine_list", ":", "if", "(", "constants", ".", "MACHINE_SEPARATOR", "not", "in", "pair", ")", "or", "(", "pair", ".", "count", "(", "constants", ".", "MACHINE_SEPARATOR", ")", "!=", "1", ")", ":", "raise", "ValueError", "(", "\"machine pairs must be passed as two strings separted by a %s\"", ",", "constants", ".", "MACHINE_SEPARATOR", ")", "(", "logical", ",", "physical", ")", "=", "pair", ".", "split", "(", "constants", ".", "MACHINE_SEPARATOR", ")", "# add checks for reachability", "mapping", "[", "logical", "]", "=", "physical", "return", "mapping" ]
Convert the machine list argument from a list of names into a mapping of logical names to physical hosts. This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access
[ "Convert", "the", "machine", "list", "argument", "from", "a", "list", "of", "names", "into", "a", "mapping", "of", "logical", "names", "to", "physical", "hosts", ".", "This", "is", "similar", "to", "the", "_parse_configs", "function", "but", "separated", "to", "provide", "the", "opportunity", "for", "extension", "and", "additional", "checking", "of", "machine", "access" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/utils.py#L79-L95
train
linkedin/Zopkio
zopkio/utils.py
parse_config_list
def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """ if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
python
def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """ if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
[ "def", "parse_config_list", "(", "config_list", ")", ":", "if", "config_list", "is", "None", ":", "return", "{", "}", "else", ":", "mapping", "=", "{", "}", "for", "pair", "in", "config_list", ":", "if", "(", "constants", ".", "CONFIG_SEPARATOR", "not", "in", "pair", ")", "or", "(", "pair", ".", "count", "(", "constants", ".", "CONFIG_SEPARATOR", ")", "!=", "1", ")", ":", "raise", "ValueError", "(", "\"configs must be passed as two strings separted by a %s\"", ",", "constants", ".", "CONFIG_SEPARATOR", ")", "(", "config", ",", "value", ")", "=", "pair", ".", "split", "(", "constants", ".", "CONFIG_SEPARATOR", ")", "mapping", "[", "config", "]", "=", "value", "return", "mapping" ]
Parse a list of configuration properties separated by '='
[ "Parse", "a", "list", "of", "configuration", "properties", "separated", "by", "=" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/utils.py#L98-L111
train
linkedin/Zopkio
zopkio/utils.py
parse_config_file
def parse_config_file(config_file_path): """ Parse a configuration file. Currently only supports .json, .py and properties separated by '=' :param config_file_path: :return: a dict of the configuration properties """ extension = os.path.splitext(config_file_path)[1] if extension == '.pyc': raise ValueError("Skipping .pyc file as config") if extension == '.json': with open(config_file_path) as config_file: try: mapping = json.load(config_file) except ValueError as e: logger.error("Did not load json configs", e) raise SyntaxError('Unable to parse config file:%s due to malformed JSON. Aborting' %(config_file_path)) elif extension == '.py': mapping = {} file_dict = load_module(config_file_path) for attr_name in dir(file_dict): if not (attr_name.startswith('_') or attr_name.startswith('__')): attr = getattr(file_dict, attr_name) if type(attr) is dict: mapping.update(attr) else: with open(config_file_path) as config_file: lines = [line.rstrip() for line in config_file if line.rstrip() != "" and not line.startswith("#")] mapping = parse_config_list(lines) return mapping
python
def parse_config_file(config_file_path): """ Parse a configuration file. Currently only supports .json, .py and properties separated by '=' :param config_file_path: :return: a dict of the configuration properties """ extension = os.path.splitext(config_file_path)[1] if extension == '.pyc': raise ValueError("Skipping .pyc file as config") if extension == '.json': with open(config_file_path) as config_file: try: mapping = json.load(config_file) except ValueError as e: logger.error("Did not load json configs", e) raise SyntaxError('Unable to parse config file:%s due to malformed JSON. Aborting' %(config_file_path)) elif extension == '.py': mapping = {} file_dict = load_module(config_file_path) for attr_name in dir(file_dict): if not (attr_name.startswith('_') or attr_name.startswith('__')): attr = getattr(file_dict, attr_name) if type(attr) is dict: mapping.update(attr) else: with open(config_file_path) as config_file: lines = [line.rstrip() for line in config_file if line.rstrip() != "" and not line.startswith("#")] mapping = parse_config_list(lines) return mapping
[ "def", "parse_config_file", "(", "config_file_path", ")", ":", "extension", "=", "os", ".", "path", ".", "splitext", "(", "config_file_path", ")", "[", "1", "]", "if", "extension", "==", "'.pyc'", ":", "raise", "ValueError", "(", "\"Skipping .pyc file as config\"", ")", "if", "extension", "==", "'.json'", ":", "with", "open", "(", "config_file_path", ")", "as", "config_file", ":", "try", ":", "mapping", "=", "json", ".", "load", "(", "config_file", ")", "except", "ValueError", "as", "e", ":", "logger", ".", "error", "(", "\"Did not load json configs\"", ",", "e", ")", "raise", "SyntaxError", "(", "'Unable to parse config file:%s due to malformed JSON. Aborting'", "%", "(", "config_file_path", ")", ")", "elif", "extension", "==", "'.py'", ":", "mapping", "=", "{", "}", "file_dict", "=", "load_module", "(", "config_file_path", ")", "for", "attr_name", "in", "dir", "(", "file_dict", ")", ":", "if", "not", "(", "attr_name", ".", "startswith", "(", "'_'", ")", "or", "attr_name", ".", "startswith", "(", "'__'", ")", ")", ":", "attr", "=", "getattr", "(", "file_dict", ",", "attr_name", ")", "if", "type", "(", "attr", ")", "is", "dict", ":", "mapping", ".", "update", "(", "attr", ")", "else", ":", "with", "open", "(", "config_file_path", ")", "as", "config_file", ":", "lines", "=", "[", "line", ".", "rstrip", "(", ")", "for", "line", "in", "config_file", "if", "line", ".", "rstrip", "(", ")", "!=", "\"\"", "and", "not", "line", ".", "startswith", "(", "\"#\"", ")", "]", "mapping", "=", "parse_config_list", "(", "lines", ")", "return", "mapping" ]
Parse a configuration file. Currently only supports .json, .py and properties separated by '=' :param config_file_path: :return: a dict of the configuration properties
[ "Parse", "a", "configuration", "file", ".", "Currently", "only", "supports", ".", "json", ".", "py", "and", "properties", "separated", "by", "=", ":", "param", "config_file_path", ":", ":", "return", ":", "a", "dict", "of", "the", "configuration", "properties" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/utils.py#L114-L143
train
linkedin/Zopkio
zopkio/remote_host_helper.py
exec_with_env
def exec_with_env(ssh, command, msg='', env={}, **kwargs): """ :param ssh: :param command: :param msg: :param env: :param synch: :return: """ bash_profile_command = "source .bash_profile > /dev/null 2> /dev/null;" env_command = build_os_environment_string(env) new_command = bash_profile_command + env_command + command if kwargs.get('sync', True): return better_exec_command(ssh, new_command, msg) else: return ssh.exec_command(new_command)
python
def exec_with_env(ssh, command, msg='', env={}, **kwargs): """ :param ssh: :param command: :param msg: :param env: :param synch: :return: """ bash_profile_command = "source .bash_profile > /dev/null 2> /dev/null;" env_command = build_os_environment_string(env) new_command = bash_profile_command + env_command + command if kwargs.get('sync', True): return better_exec_command(ssh, new_command, msg) else: return ssh.exec_command(new_command)
[ "def", "exec_with_env", "(", "ssh", ",", "command", ",", "msg", "=", "''", ",", "env", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "bash_profile_command", "=", "\"source .bash_profile > /dev/null 2> /dev/null;\"", "env_command", "=", "build_os_environment_string", "(", "env", ")", "new_command", "=", "bash_profile_command", "+", "env_command", "+", "command", "if", "kwargs", ".", "get", "(", "'sync'", ",", "True", ")", ":", "return", "better_exec_command", "(", "ssh", ",", "new_command", ",", "msg", ")", "else", ":", "return", "ssh", ".", "exec_command", "(", "new_command", ")" ]
:param ssh: :param command: :param msg: :param env: :param synch: :return:
[ ":", "param", "ssh", ":", ":", "param", "command", ":", ":", "param", "msg", ":", ":", "param", "env", ":", ":", "param", "synch", ":", ":", "return", ":" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/remote_host_helper.py#L72-L88
train
linkedin/Zopkio
zopkio/remote_host_helper.py
better_exec_command
def better_exec_command(ssh, command, msg): """Uses paramiko to execute a command but handles failure by raising a ParamikoError if the command fails. Note that unlike paramiko.SSHClient.exec_command this is not asynchronous because we wait until the exit status is known :Parameter ssh: a paramiko SSH Client :Parameter command: the command to execute :Parameter msg: message to print on failure :Returns (paramiko.Channel) the underlying channel so that the caller can extract stdout or send to stdin :Raises SSHException: if paramiko would raise an SSHException :Raises ParamikoError: if the command produces output to stderr """ chan = ssh.get_transport().open_session() chan.exec_command(command) exit_status = chan.recv_exit_status() if exit_status != 0: msg_str = chan.recv_stderr(1024) err_msgs = [] while len(msg_str) > 0: err_msgs.append(msg_str) msg_str = chan.recv_stderr(1024) err_msg = ''.join(err_msgs) logger.error(err_msg) raise ParamikoError(msg, err_msg) return chan
python
def better_exec_command(ssh, command, msg): """Uses paramiko to execute a command but handles failure by raising a ParamikoError if the command fails. Note that unlike paramiko.SSHClient.exec_command this is not asynchronous because we wait until the exit status is known :Parameter ssh: a paramiko SSH Client :Parameter command: the command to execute :Parameter msg: message to print on failure :Returns (paramiko.Channel) the underlying channel so that the caller can extract stdout or send to stdin :Raises SSHException: if paramiko would raise an SSHException :Raises ParamikoError: if the command produces output to stderr """ chan = ssh.get_transport().open_session() chan.exec_command(command) exit_status = chan.recv_exit_status() if exit_status != 0: msg_str = chan.recv_stderr(1024) err_msgs = [] while len(msg_str) > 0: err_msgs.append(msg_str) msg_str = chan.recv_stderr(1024) err_msg = ''.join(err_msgs) logger.error(err_msg) raise ParamikoError(msg, err_msg) return chan
[ "def", "better_exec_command", "(", "ssh", ",", "command", ",", "msg", ")", ":", "chan", "=", "ssh", ".", "get_transport", "(", ")", ".", "open_session", "(", ")", "chan", ".", "exec_command", "(", "command", ")", "exit_status", "=", "chan", ".", "recv_exit_status", "(", ")", "if", "exit_status", "!=", "0", ":", "msg_str", "=", "chan", ".", "recv_stderr", "(", "1024", ")", "err_msgs", "=", "[", "]", "while", "len", "(", "msg_str", ")", ">", "0", ":", "err_msgs", ".", "append", "(", "msg_str", ")", "msg_str", "=", "chan", ".", "recv_stderr", "(", "1024", ")", "err_msg", "=", "''", ".", "join", "(", "err_msgs", ")", "logger", ".", "error", "(", "err_msg", ")", "raise", "ParamikoError", "(", "msg", ",", "err_msg", ")", "return", "chan" ]
Uses paramiko to execute a command but handles failure by raising a ParamikoError if the command fails. Note that unlike paramiko.SSHClient.exec_command this is not asynchronous because we wait until the exit status is known :Parameter ssh: a paramiko SSH Client :Parameter command: the command to execute :Parameter msg: message to print on failure :Returns (paramiko.Channel) the underlying channel so that the caller can extract stdout or send to stdin :Raises SSHException: if paramiko would raise an SSHException :Raises ParamikoError: if the command produces output to stderr
[ "Uses", "paramiko", "to", "execute", "a", "command", "but", "handles", "failure", "by", "raising", "a", "ParamikoError", "if", "the", "command", "fails", ".", "Note", "that", "unlike", "paramiko", ".", "SSHClient", ".", "exec_command", "this", "is", "not", "asynchronous", "because", "we", "wait", "until", "the", "exit", "status", "is", "known", ":", "Parameter", "ssh", ":", "a", "paramiko", "SSH", "Client", ":", "Parameter", "command", ":", "the", "command", "to", "execute", ":", "Parameter", "msg", ":", "message", "to", "print", "on", "failure", ":", "Returns", "(", "paramiko", ".", "Channel", ")", "the", "underlying", "channel", "so", "that", "the", "caller", "can", "extract", "stdout", "or", "send", "to", "stdin", ":", "Raises", "SSHException", ":", "if", "paramiko", "would", "raise", "an", "SSHException", ":", "Raises", "ParamikoError", ":", "if", "the", "command", "produces", "output", "to", "stderr" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/remote_host_helper.py#L90-L116
train
linkedin/Zopkio
zopkio/remote_host_helper.py
log_output
def log_output(chan): """ logs the output from a remote command the input should be an open channel in the case of synchronous better_exec_command otherwise this will not log anything and simply return to the caller :param chan: :return: """ if hasattr(chan, "recv"): str = chan.recv(1024) msgs = [] while len(str) > 0: msgs.append(str) str = chan.recv(1024) msg = ''.join(msgs).strip() if len(msg) > 0: logger.info(msg)
python
def log_output(chan): """ logs the output from a remote command the input should be an open channel in the case of synchronous better_exec_command otherwise this will not log anything and simply return to the caller :param chan: :return: """ if hasattr(chan, "recv"): str = chan.recv(1024) msgs = [] while len(str) > 0: msgs.append(str) str = chan.recv(1024) msg = ''.join(msgs).strip() if len(msg) > 0: logger.info(msg)
[ "def", "log_output", "(", "chan", ")", ":", "if", "hasattr", "(", "chan", ",", "\"recv\"", ")", ":", "str", "=", "chan", ".", "recv", "(", "1024", ")", "msgs", "=", "[", "]", "while", "len", "(", "str", ")", ">", "0", ":", "msgs", ".", "append", "(", "str", ")", "str", "=", "chan", ".", "recv", "(", "1024", ")", "msg", "=", "''", ".", "join", "(", "msgs", ")", ".", "strip", "(", ")", "if", "len", "(", "msg", ")", ">", "0", ":", "logger", ".", "info", "(", "msg", ")" ]
logs the output from a remote command the input should be an open channel in the case of synchronous better_exec_command otherwise this will not log anything and simply return to the caller :param chan: :return:
[ "logs", "the", "output", "from", "a", "remote", "command", "the", "input", "should", "be", "an", "open", "channel", "in", "the", "case", "of", "synchronous", "better_exec_command", "otherwise", "this", "will", "not", "log", "anything", "and", "simply", "return", "to", "the", "caller", ":", "param", "chan", ":", ":", "return", ":" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/remote_host_helper.py#L118-L134
train
linkedin/Zopkio
zopkio/remote_host_helper.py
copy_dir
def copy_dir(ftp, filename, outputdir, prefix, pattern=''): """ Recursively copy a directory flattens the output into a single directory but prefixes the files with the path from the original input directory :param ftp: :param filename: :param outputdir: :param prefix: :param pattern: a regex pattern for files to match (by default matches everything) :return: """ try: mode = ftp.stat(filename).st_mode except IOError, e: if e.errno == errno.ENOENT: logger.error("Log file " + filename + " does not exist") pass else: if mode & stat.S_IFREG: if re.match(pattern, filename) is not None: new_file = os.path.join(outputdir, "{0}-{1}".format(prefix, os.path.basename(filename))) ftp.get(filename, new_file) elif mode & stat.S_IFDIR: for f in ftp.listdir(filename): copy_dir(ftp, os.path.join(filename, f), outputdir, "{0}_{1}".format(prefix, os.path.basename(filename)), pattern)
python
def copy_dir(ftp, filename, outputdir, prefix, pattern=''): """ Recursively copy a directory flattens the output into a single directory but prefixes the files with the path from the original input directory :param ftp: :param filename: :param outputdir: :param prefix: :param pattern: a regex pattern for files to match (by default matches everything) :return: """ try: mode = ftp.stat(filename).st_mode except IOError, e: if e.errno == errno.ENOENT: logger.error("Log file " + filename + " does not exist") pass else: if mode & stat.S_IFREG: if re.match(pattern, filename) is not None: new_file = os.path.join(outputdir, "{0}-{1}".format(prefix, os.path.basename(filename))) ftp.get(filename, new_file) elif mode & stat.S_IFDIR: for f in ftp.listdir(filename): copy_dir(ftp, os.path.join(filename, f), outputdir, "{0}_{1}".format(prefix, os.path.basename(filename)), pattern)
[ "def", "copy_dir", "(", "ftp", ",", "filename", ",", "outputdir", ",", "prefix", ",", "pattern", "=", "''", ")", ":", "try", ":", "mode", "=", "ftp", ".", "stat", "(", "filename", ")", ".", "st_mode", "except", "IOError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "logger", ".", "error", "(", "\"Log file \"", "+", "filename", "+", "\" does not exist\"", ")", "pass", "else", ":", "if", "mode", "&", "stat", ".", "S_IFREG", ":", "if", "re", ".", "match", "(", "pattern", ",", "filename", ")", "is", "not", "None", ":", "new_file", "=", "os", ".", "path", ".", "join", "(", "outputdir", ",", "\"{0}-{1}\"", ".", "format", "(", "prefix", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", ")", "ftp", ".", "get", "(", "filename", ",", "new_file", ")", "elif", "mode", "&", "stat", ".", "S_IFDIR", ":", "for", "f", "in", "ftp", ".", "listdir", "(", "filename", ")", ":", "copy_dir", "(", "ftp", ",", "os", ".", "path", ".", "join", "(", "filename", ",", "f", ")", ",", "outputdir", ",", "\"{0}_{1}\"", ".", "format", "(", "prefix", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", ",", "pattern", ")" ]
Recursively copy a directory flattens the output into a single directory but prefixes the files with the path from the original input directory :param ftp: :param filename: :param outputdir: :param prefix: :param pattern: a regex pattern for files to match (by default matches everything) :return:
[ "Recursively", "copy", "a", "directory", "flattens", "the", "output", "into", "a", "single", "directory", "but", "prefixes", "the", "files", "with", "the", "path", "from", "the", "original", "input", "directory", ":", "param", "ftp", ":", ":", "param", "filename", ":", ":", "param", "outputdir", ":", ":", "param", "prefix", ":", ":", "param", "pattern", ":", "a", "regex", "pattern", "for", "files", "to", "match", "(", "by", "default", "matches", "everything", ")", ":", "return", ":" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/remote_host_helper.py#L137-L162
train
linkedin/Zopkio
zopkio/remote_host_helper.py
open_remote_file
def open_remote_file(hostname, filename, mode='r', bufsize=-1, username=None, password=None): """ :param hostname: :param filename: :return: """ with get_ssh_client(hostname, username=username, password=password) as ssh: sftp = None f = None try: sftp = ssh.open_sftp() f = sftp.open(filename, mode, bufsize) yield f finally: if f is not None: f.close() if sftp is not None: sftp.close()
python
def open_remote_file(hostname, filename, mode='r', bufsize=-1, username=None, password=None): """ :param hostname: :param filename: :return: """ with get_ssh_client(hostname, username=username, password=password) as ssh: sftp = None f = None try: sftp = ssh.open_sftp() f = sftp.open(filename, mode, bufsize) yield f finally: if f is not None: f.close() if sftp is not None: sftp.close()
[ "def", "open_remote_file", "(", "hostname", ",", "filename", ",", "mode", "=", "'r'", ",", "bufsize", "=", "-", "1", ",", "username", "=", "None", ",", "password", "=", "None", ")", ":", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "username", ",", "password", "=", "password", ")", "as", "ssh", ":", "sftp", "=", "None", "f", "=", "None", "try", ":", "sftp", "=", "ssh", ".", "open_sftp", "(", ")", "f", "=", "sftp", ".", "open", "(", "filename", ",", "mode", ",", "bufsize", ")", "yield", "f", "finally", ":", "if", "f", "is", "not", "None", ":", "f", ".", "close", "(", ")", "if", "sftp", "is", "not", "None", ":", "sftp", ".", "close", "(", ")" ]
:param hostname: :param filename: :return:
[ ":", "param", "hostname", ":", ":", "param", "filename", ":", ":", "return", ":" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/remote_host_helper.py#L166-L184
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.deploy
def deploy(self, unique_id, configs=None): """Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment """ self.install(unique_id, configs) self.start(unique_id, configs)
python
def deploy(self, unique_id, configs=None): """Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment """ self.install(unique_id, configs) self.start(unique_id, configs)
[ "def", "deploy", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "install", "(", "unique_id", ",", "configs", ")", "self", ".", "start", "(", "unique_id", ",", "configs", ")" ]
Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment
[ "Deploys", "the", "service", "to", "the", "host", ".", "This", "should", "at", "least", "perform", "the", "same", "actions", "as", "install", "and", "start", "but", "may", "perform", "additional", "tasks", "as", "needed", "." ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L81-L89
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.undeploy
def undeploy(self, unique_id, configs=None): """Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use """ self.stop(unique_id, configs) self.uninstall(unique_id, configs)
python
def undeploy(self, unique_id, configs=None): """Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use """ self.stop(unique_id, configs) self.uninstall(unique_id, configs)
[ "def", "undeploy", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "stop", "(", "unique_id", ",", "configs", ")", "self", ".", "uninstall", "(", "unique_id", ",", "configs", ")" ]
Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use
[ "Undeploys", "the", "service", ".", "This", "should", "at", "least", "perform", "the", "same", "actions", "as", "stop", "and", "uninstall", "but", "may", "perform", "additional", "tasks", "as", "needed", "." ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L91-L99
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.soft_bounce
def soft_bounce(self, unique_id, configs=None): """ Performs a soft bounce (stop and start) for the specified process :Parameter unique_id: the name of the process """ self.stop(unique_id, configs) self.start(unique_id, configs)
python
def soft_bounce(self, unique_id, configs=None): """ Performs a soft bounce (stop and start) for the specified process :Parameter unique_id: the name of the process """ self.stop(unique_id, configs) self.start(unique_id, configs)
[ "def", "soft_bounce", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "stop", "(", "unique_id", ",", "configs", ")", "self", ".", "start", "(", "unique_id", ",", "configs", ")" ]
Performs a soft bounce (stop and start) for the specified process :Parameter unique_id: the name of the process
[ "Performs", "a", "soft", "bounce", "(", "stop", "and", "start", ")", "for", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L148-L154
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.hard_bounce
def hard_bounce(self, unique_id, configs=None): """ Performs a hard bounce (kill and start) for the specified process :Parameter unique_id: the name of the process """ self.kill(unique_id, configs) self.start(unique_id, configs)
python
def hard_bounce(self, unique_id, configs=None): """ Performs a hard bounce (kill and start) for the specified process :Parameter unique_id: the name of the process """ self.kill(unique_id, configs) self.start(unique_id, configs)
[ "def", "hard_bounce", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "kill", "(", "unique_id", ",", "configs", ")", "self", ".", "start", "(", "unique_id", ",", "configs", ")" ]
Performs a hard bounce (kill and start) for the specified process :Parameter unique_id: the name of the process
[ "Performs", "a", "hard", "bounce", "(", "kill", "and", "start", ")", "for", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L156-L162
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.sleep
def sleep(self, unique_id, delay, configs=None): """ Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds """ self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
python
def sleep(self, unique_id, delay, configs=None): """ Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds """ self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
[ "def", "sleep", "(", "self", ",", "unique_id", ",", "delay", ",", "configs", "=", "None", ")", ":", "self", ".", "pause", "(", "unique_id", ",", "configs", ")", "time", ".", "sleep", "(", "delay", ")", "self", ".", "resume", "(", "unique_id", ",", "configs", ")" ]
Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds
[ "Pauses", "the", "process", "for", "the", "specified", "delay", "and", "then", "resumes", "it" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L164-L172
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.pause
def pause(self, unique_id, configs=None): """ Issues a sigstop for the specified process :Parameter unique_id: the name of the process """ pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -SIGSTOP {0}".format(pid_str), "PAUSING PROCESS {0}".format(unique_id))
python
def pause(self, unique_id, configs=None): """ Issues a sigstop for the specified process :Parameter unique_id: the name of the process """ pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -SIGSTOP {0}".format(pid_str), "PAUSING PROCESS {0}".format(unique_id))
[ "def", "pause", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "pids", "=", "self", ".", "get_pid", "(", "unique_id", ",", "configs", ")", "if", "pids", "!=", "constants", ".", "PROCESS_NOT_RUNNING_PID", ":", "pid_str", "=", "' '", ".", "join", "(", "str", "(", "pid", ")", "for", "pid", "in", "pids", ")", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "better_exec_command", "(", "ssh", ",", "\"kill -SIGSTOP {0}\"", ".", "format", "(", "pid_str", ")", ",", "\"PAUSING PROCESS {0}\"", ".", "format", "(", "unique_id", ")", ")" ]
Issues a sigstop for the specified process :Parameter unique_id: the name of the process
[ "Issues", "a", "sigstop", "for", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L174-L184
train
linkedin/Zopkio
zopkio/deployer.py
Deployer._send_signal
def _send_signal(self, unique_id, signalno, configs): """ Issues a signal for the specified process :Parameter unique_id: the name of the process """ pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname msg= Deployer._signalnames.get(signalno,"SENDING SIGNAL %s TO"%signalno) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -{0} {1}".format(signalno, pid_str), "{0} PROCESS {1}".format(msg, unique_id))
python
def _send_signal(self, unique_id, signalno, configs): """ Issues a signal for the specified process :Parameter unique_id: the name of the process """ pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname msg= Deployer._signalnames.get(signalno,"SENDING SIGNAL %s TO"%signalno) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -{0} {1}".format(signalno, pid_str), "{0} PROCESS {1}".format(msg, unique_id))
[ "def", "_send_signal", "(", "self", ",", "unique_id", ",", "signalno", ",", "configs", ")", ":", "pids", "=", "self", ".", "get_pid", "(", "unique_id", ",", "configs", ")", "if", "pids", "!=", "constants", ".", "PROCESS_NOT_RUNNING_PID", ":", "pid_str", "=", "' '", ".", "join", "(", "str", "(", "pid", ")", "for", "pid", "in", "pids", ")", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "msg", "=", "Deployer", ".", "_signalnames", ".", "get", "(", "signalno", ",", "\"SENDING SIGNAL %s TO\"", "%", "signalno", ")", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "better_exec_command", "(", "ssh", ",", "\"kill -{0} {1}\"", ".", "format", "(", "signalno", ",", "pid_str", ")", ",", "\"{0} PROCESS {1}\"", ".", "format", "(", "msg", ",", "unique_id", ")", ")" ]
Issues a signal for the specified process :Parameter unique_id: the name of the process
[ "Issues", "a", "signal", "for", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L186-L197
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.resume
def resume(self, unique_id, configs=None): """ Issues a sigcont for the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGCONT,configs)
python
def resume(self, unique_id, configs=None): """ Issues a sigcont for the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGCONT,configs)
[ "def", "resume", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "_send_signal", "(", "unique_id", ",", "signal", ".", "SIGCONT", ",", "configs", ")" ]
Issues a sigcont for the specified process :Parameter unique_id: the name of the process
[ "Issues", "a", "sigcont", "for", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L200-L205
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.kill
def kill(self, unique_id, configs=None): """ Issues a kill -9 to the specified process calls the deployers get_pid function for the process. If no pid_file/pid_keyword is specified a generic grep of ps aux command is executed on remote machine based on process parameters which may not be reliable if more process are running with similar name :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGKILL, configs)
python
def kill(self, unique_id, configs=None): """ Issues a kill -9 to the specified process calls the deployers get_pid function for the process. If no pid_file/pid_keyword is specified a generic grep of ps aux command is executed on remote machine based on process parameters which may not be reliable if more process are running with similar name :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGKILL, configs)
[ "def", "kill", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "_send_signal", "(", "unique_id", ",", "signal", ".", "SIGKILL", ",", "configs", ")" ]
Issues a kill -9 to the specified process calls the deployers get_pid function for the process. If no pid_file/pid_keyword is specified a generic grep of ps aux command is executed on remote machine based on process parameters which may not be reliable if more process are running with similar name :Parameter unique_id: the name of the process
[ "Issues", "a", "kill", "-", "9", "to", "the", "specified", "process", "calls", "the", "deployers", "get_pid", "function", "for", "the", "process", ".", "If", "no", "pid_file", "/", "pid_keyword", "is", "specified", "a", "generic", "grep", "of", "ps", "aux", "command", "is", "executed", "on", "remote", "machine", "based", "on", "process", "parameters", "which", "may", "not", "be", "reliable", "if", "more", "process", "are", "running", "with", "similar", "name" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L207-L215
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.terminate
def terminate(self, unique_id, configs=None): """ Issues a kill -15 to the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGTERM, configs)
python
def terminate(self, unique_id, configs=None): """ Issues a kill -15 to the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGTERM, configs)
[ "def", "terminate", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "_send_signal", "(", "unique_id", ",", "signal", ".", "SIGTERM", ",", "configs", ")" ]
Issues a kill -15 to the specified process :Parameter unique_id: the name of the process
[ "Issues", "a", "kill", "-", "15", "to", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L217-L222
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.hangup
def hangup(self, unique_id, configs=None): """ Issue a signal to hangup the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGHUP, configs)
python
def hangup(self, unique_id, configs=None): """ Issue a signal to hangup the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGHUP, configs)
[ "def", "hangup", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "self", ".", "_send_signal", "(", "unique_id", ",", "signal", ".", "SIGHUP", ",", "configs", ")" ]
Issue a signal to hangup the specified process :Parameter unique_id: the name of the process
[ "Issue", "a", "signal", "to", "hangup", "the", "specified", "process" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L224-L230
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.get_logs
def get_logs(self, unique_id, logs, directory, pattern=constants.FILTER_NAME_ALLOW_NONE): """deprecated name for fetch_logs""" self.fetch_logs(unique_id, logs, directory, pattern)
python
def get_logs(self, unique_id, logs, directory, pattern=constants.FILTER_NAME_ALLOW_NONE): """deprecated name for fetch_logs""" self.fetch_logs(unique_id, logs, directory, pattern)
[ "def", "get_logs", "(", "self", ",", "unique_id", ",", "logs", ",", "directory", ",", "pattern", "=", "constants", ".", "FILTER_NAME_ALLOW_NONE", ")", ":", "self", ".", "fetch_logs", "(", "unique_id", ",", "logs", ",", "directory", ",", "pattern", ")" ]
deprecated name for fetch_logs
[ "deprecated", "name", "for", "fetch_logs" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L232-L234
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.fetch_logs
def fetch_logs(self, unique_id, logs, directory, pattern=constants.FILTER_NAME_ALLOW_NONE): """ Copies logs from the remote host that the process is running on to the provided directory :Parameter unique_id the unique_id of the process in question :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied """ hostname = self.processes[unique_id].hostname install_path = self.processes[unique_id].install_path self.fetch_logs_from_host(hostname, install_path, unique_id, logs, directory, pattern)
python
def fetch_logs(self, unique_id, logs, directory, pattern=constants.FILTER_NAME_ALLOW_NONE): """ Copies logs from the remote host that the process is running on to the provided directory :Parameter unique_id the unique_id of the process in question :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied """ hostname = self.processes[unique_id].hostname install_path = self.processes[unique_id].install_path self.fetch_logs_from_host(hostname, install_path, unique_id, logs, directory, pattern)
[ "def", "fetch_logs", "(", "self", ",", "unique_id", ",", "logs", ",", "directory", ",", "pattern", "=", "constants", ".", "FILTER_NAME_ALLOW_NONE", ")", ":", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "install_path", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "install_path", "self", ".", "fetch_logs_from_host", "(", "hostname", ",", "install_path", ",", "unique_id", ",", "logs", ",", "directory", ",", "pattern", ")" ]
Copies logs from the remote host that the process is running on to the provided directory :Parameter unique_id the unique_id of the process in question :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied
[ "Copies", "logs", "from", "the", "remote", "host", "that", "the", "process", "is", "running", "on", "to", "the", "provided", "directory" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L236-L246
train
linkedin/Zopkio
zopkio/deployer.py
Deployer.fetch_logs_from_host
def fetch_logs_from_host(hostname, install_path, prefix, logs, directory, pattern): """ Static method Copies logs from specified host on the specified install path :Parameter hostname the remote host from where we need to fetch the logs :Parameter install_path path where the app is installed :Parameter prefix prefix used to copy logs. Generall the unique_id of process :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied """ if hostname is not None: with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp: for f in logs: try: mode = ftp.stat(f).st_mode except IOError, e: if e.errno == errno.ENOENT: logger.error("Log file " + f + " does not exist on " + hostname) pass else: copy_dir(ftp, f, directory, prefix) if install_path is not None: copy_dir(ftp, install_path, directory, prefix, pattern)
python
def fetch_logs_from_host(hostname, install_path, prefix, logs, directory, pattern): """ Static method Copies logs from specified host on the specified install path :Parameter hostname the remote host from where we need to fetch the logs :Parameter install_path path where the app is installed :Parameter prefix prefix used to copy logs. Generall the unique_id of process :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied """ if hostname is not None: with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp: for f in logs: try: mode = ftp.stat(f).st_mode except IOError, e: if e.errno == errno.ENOENT: logger.error("Log file " + f + " does not exist on " + hostname) pass else: copy_dir(ftp, f, directory, prefix) if install_path is not None: copy_dir(ftp, install_path, directory, prefix, pattern)
[ "def", "fetch_logs_from_host", "(", "hostname", ",", "install_path", ",", "prefix", ",", "logs", ",", "directory", ",", "pattern", ")", ":", "if", "hostname", "is", "not", "None", ":", "with", "get_sftp_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ftp", ":", "for", "f", "in", "logs", ":", "try", ":", "mode", "=", "ftp", ".", "stat", "(", "f", ")", ".", "st_mode", "except", "IOError", ",", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "ENOENT", ":", "logger", ".", "error", "(", "\"Log file \"", "+", "f", "+", "\" does not exist on \"", "+", "hostname", ")", "pass", "else", ":", "copy_dir", "(", "ftp", ",", "f", ",", "directory", ",", "prefix", ")", "if", "install_path", "is", "not", "None", ":", "copy_dir", "(", "ftp", ",", "install_path", ",", "directory", ",", "prefix", ",", "pattern", ")" ]
Static method Copies logs from specified host on the specified install path :Parameter hostname the remote host from where we need to fetch the logs :Parameter install_path path where the app is installed :Parameter prefix prefix used to copy logs. Generall the unique_id of process :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied
[ "Static", "method", "Copies", "logs", "from", "specified", "host", "on", "the", "specified", "install", "path" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/deployer.py#L249-L271
train
linkedin/Zopkio
zopkio/junit_reporter.py
Reporter.generate
def generate(self): """ Generates the report """ self._setup() for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) testsuite = self._generate_junit_xml(config_name) with open(os.path.join(self.report_info.junit_xml_path, 'zopkio_junit_reports.xml'), 'w') as file: TestSuite.to_file(file, [testsuite], prettyprint=False)
python
def generate(self): """ Generates the report """ self._setup() for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) testsuite = self._generate_junit_xml(config_name) with open(os.path.join(self.report_info.junit_xml_path, 'zopkio_junit_reports.xml'), 'w') as file: TestSuite.to_file(file, [testsuite], prettyprint=False)
[ "def", "generate", "(", "self", ")", ":", "self", ".", "_setup", "(", ")", "for", "config_name", "in", "self", ".", "report_info", ".", "config_to_test_names_map", ".", "keys", "(", ")", ":", "config_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "report_info", ".", "resource_dir", ",", "config_name", ")", "utils", ".", "makedirs", "(", "config_dir", ")", "testsuite", "=", "self", ".", "_generate_junit_xml", "(", "config_name", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "report_info", ".", "junit_xml_path", ",", "'zopkio_junit_reports.xml'", ")", ",", "'w'", ")", "as", "file", ":", "TestSuite", ".", "to_file", "(", "file", ",", "[", "testsuite", "]", ",", "prettyprint", "=", "False", ")" ]
Generates the report
[ "Generates", "the", "report" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/junit_reporter.py#L81-L91
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.install
def install(self, unique_id, configs=None): """ Copies the executable to the remote machine under install path. Inspects the configs for the possible keys 'hostname': the host to install on 'install_path': the location on the remote host 'executable': the executable to copy 'no_copy': if this config is passed in and true then this method will not copy the executable assuming that it is already installed 'post_install_cmds': an optional list of commands that should be executed on the remote machine after the executable has been installed. If no_copy is set to true, then the post install commands will not be run. If the unique_id is already installed on a different host, this will perform the cleanup action first. If either 'install_path' or 'executable' are provided the new value will become the default. :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp hostname = None is_tarfile = False is_zipfile = False if unique_id in self.processes and 'hostname' in configs: self.uninstall(unique_id, configs) hostname = configs['hostname'] elif 'hostname' in configs: hostname = configs['hostname'] elif unique_id not in self.processes: # we have not installed this unique_id before and no hostname is provided in the configs so raise an error raise DeploymentError("hostname was not provided for unique_id: " + unique_id) env = configs.get("env", {}) install_path = configs.get('install_path') or self.default_configs.get('install_path') pid_file = configs.get('pid_file') or self.default_configs.get('pid_file') if install_path is None: logger.error("install_path was not provided for unique_id: " + unique_id) raise DeploymentError("install_path was not provided for unique_id: " + unique_id) if not configs.get('no_copy', False): with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(better_exec_command(ssh, "mkdir -p {0}".format(install_path), "Failed to create path {0}".format(install_path))) log_output(better_exec_command(ssh, "chmod 755 {0}".format(install_path), "Failed to make path {0} writeable".format(install_path))) executable = configs.get('executable') or self.default_configs.get('executable') if executable is None: logger.error("executable was not provided for unique_id: " + unique_id) raise DeploymentError("executable was not provided for unique_id: " + unique_id) #if the executable is in remote location copy to local machine copy_from_remote_location = False; if (":" in executable): copy_from_remote_location = True if ("http" not in executable): remote_location_server = executable.split(":")[0] remote_file_path = executable.split(":")[1] remote_file_name = os.path.basename(remote_file_path) local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name) if not os.path.exists(local_temp_file_name): with get_sftp_client(remote_location_server,username=runtime.get_username(), password=runtime.get_password()) as ftp: try: ftp.get(remote_file_path, local_temp_file_name) executable = local_temp_file_name except: raise DeploymentError("Unable to load file from remote server " + executable) #use urllib for http copy else: remote_file_name = executable.split("/")[-1] local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name) if not os.path.exists(local_temp_file_name): try: urllib.urlretrieve (executable, local_temp_file_name) except: raise DeploymentError("Unable to load file from remote server " + executable) executable = local_temp_file_name try: exec_name = os.path.basename(executable) install_location = os.path.join(install_path, exec_name) with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp: ftp.put(executable, install_location) except: raise DeploymentError("Unable to copy executable to install_location:" + install_location) finally: #Track if its a tarfile or zipfile before deleting it in case the copy to remote location fails is_tarfile = tarfile.is_tarfile(executable) is_zipfile = zipfile.is_zipfile(executable) if (copy_from_remote_location and not configs.get('cache',False)): os.remove(executable) # only supports tar and zip (because those modules are provided by Python's standard library) if configs.get('extract', False) or self.default_configs.get('extract', False): if is_tarfile: log_output(better_exec_command(ssh, "tar -xf {0} -C {1}".format(install_location, install_path), "Failed to extract tarfile {0}".format(exec_name))) elif is_zipfile: log_output(better_exec_command(ssh, "unzip -o {0} -d {1}".format(install_location, install_path), "Failed to extract zipfile {0}".format(exec_name))) else: logger.error(executable + " is not a supported filetype for extracting") raise DeploymentError(executable + " is not a supported filetype for extracting") post_install_cmds = configs.get('post_install_cmds', False) or self.default_configs.get('post_install_cmds', []) for cmd in post_install_cmds: relative_cmd = "cd {0}; {1}".format(install_path, cmd) log_output(exec_with_env(ssh, relative_cmd, msg="Failed to execute post install command: {0}".format(relative_cmd), env=env)) self.processes[unique_id] = Process(unique_id, self.service_name, hostname, install_path) self.processes[unique_id].pid_file = pid_file
python
def install(self, unique_id, configs=None): """ Copies the executable to the remote machine under install path. Inspects the configs for the possible keys 'hostname': the host to install on 'install_path': the location on the remote host 'executable': the executable to copy 'no_copy': if this config is passed in and true then this method will not copy the executable assuming that it is already installed 'post_install_cmds': an optional list of commands that should be executed on the remote machine after the executable has been installed. If no_copy is set to true, then the post install commands will not be run. If the unique_id is already installed on a different host, this will perform the cleanup action first. If either 'install_path' or 'executable' are provided the new value will become the default. :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp hostname = None is_tarfile = False is_zipfile = False if unique_id in self.processes and 'hostname' in configs: self.uninstall(unique_id, configs) hostname = configs['hostname'] elif 'hostname' in configs: hostname = configs['hostname'] elif unique_id not in self.processes: # we have not installed this unique_id before and no hostname is provided in the configs so raise an error raise DeploymentError("hostname was not provided for unique_id: " + unique_id) env = configs.get("env", {}) install_path = configs.get('install_path') or self.default_configs.get('install_path') pid_file = configs.get('pid_file') or self.default_configs.get('pid_file') if install_path is None: logger.error("install_path was not provided for unique_id: " + unique_id) raise DeploymentError("install_path was not provided for unique_id: " + unique_id) if not configs.get('no_copy', False): with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(better_exec_command(ssh, "mkdir -p {0}".format(install_path), "Failed to create path {0}".format(install_path))) log_output(better_exec_command(ssh, "chmod 755 {0}".format(install_path), "Failed to make path {0} writeable".format(install_path))) executable = configs.get('executable') or self.default_configs.get('executable') if executable is None: logger.error("executable was not provided for unique_id: " + unique_id) raise DeploymentError("executable was not provided for unique_id: " + unique_id) #if the executable is in remote location copy to local machine copy_from_remote_location = False; if (":" in executable): copy_from_remote_location = True if ("http" not in executable): remote_location_server = executable.split(":")[0] remote_file_path = executable.split(":")[1] remote_file_name = os.path.basename(remote_file_path) local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name) if not os.path.exists(local_temp_file_name): with get_sftp_client(remote_location_server,username=runtime.get_username(), password=runtime.get_password()) as ftp: try: ftp.get(remote_file_path, local_temp_file_name) executable = local_temp_file_name except: raise DeploymentError("Unable to load file from remote server " + executable) #use urllib for http copy else: remote_file_name = executable.split("/")[-1] local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name) if not os.path.exists(local_temp_file_name): try: urllib.urlretrieve (executable, local_temp_file_name) except: raise DeploymentError("Unable to load file from remote server " + executable) executable = local_temp_file_name try: exec_name = os.path.basename(executable) install_location = os.path.join(install_path, exec_name) with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp: ftp.put(executable, install_location) except: raise DeploymentError("Unable to copy executable to install_location:" + install_location) finally: #Track if its a tarfile or zipfile before deleting it in case the copy to remote location fails is_tarfile = tarfile.is_tarfile(executable) is_zipfile = zipfile.is_zipfile(executable) if (copy_from_remote_location and not configs.get('cache',False)): os.remove(executable) # only supports tar and zip (because those modules are provided by Python's standard library) if configs.get('extract', False) or self.default_configs.get('extract', False): if is_tarfile: log_output(better_exec_command(ssh, "tar -xf {0} -C {1}".format(install_location, install_path), "Failed to extract tarfile {0}".format(exec_name))) elif is_zipfile: log_output(better_exec_command(ssh, "unzip -o {0} -d {1}".format(install_location, install_path), "Failed to extract zipfile {0}".format(exec_name))) else: logger.error(executable + " is not a supported filetype for extracting") raise DeploymentError(executable + " is not a supported filetype for extracting") post_install_cmds = configs.get('post_install_cmds', False) or self.default_configs.get('post_install_cmds', []) for cmd in post_install_cmds: relative_cmd = "cd {0}; {1}".format(install_path, cmd) log_output(exec_with_env(ssh, relative_cmd, msg="Failed to execute post install command: {0}".format(relative_cmd), env=env)) self.processes[unique_id] = Process(unique_id, self.service_name, hostname, install_path) self.processes[unique_id].pid_file = pid_file
[ "def", "install", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "# the following is necessay to set the configs for this function as the combination of the", "# default configurations and the parameter with the parameter superceding the defaults but", "# not modifying the defaults", "if", "configs", "is", "None", ":", "configs", "=", "{", "}", "tmp", "=", "self", ".", "default_configs", ".", "copy", "(", ")", "tmp", ".", "update", "(", "configs", ")", "configs", "=", "tmp", "hostname", "=", "None", "is_tarfile", "=", "False", "is_zipfile", "=", "False", "if", "unique_id", "in", "self", ".", "processes", "and", "'hostname'", "in", "configs", ":", "self", ".", "uninstall", "(", "unique_id", ",", "configs", ")", "hostname", "=", "configs", "[", "'hostname'", "]", "elif", "'hostname'", "in", "configs", ":", "hostname", "=", "configs", "[", "'hostname'", "]", "elif", "unique_id", "not", "in", "self", ".", "processes", ":", "# we have not installed this unique_id before and no hostname is provided in the configs so raise an error", "raise", "DeploymentError", "(", "\"hostname was not provided for unique_id: \"", "+", "unique_id", ")", "env", "=", "configs", ".", "get", "(", "\"env\"", ",", "{", "}", ")", "install_path", "=", "configs", ".", "get", "(", "'install_path'", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'install_path'", ")", "pid_file", "=", "configs", ".", "get", "(", "'pid_file'", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'pid_file'", ")", "if", "install_path", "is", "None", ":", "logger", ".", "error", "(", "\"install_path was not provided for unique_id: \"", "+", "unique_id", ")", "raise", "DeploymentError", "(", "\"install_path was not provided for unique_id: \"", "+", "unique_id", ")", "if", "not", "configs", ".", "get", "(", "'no_copy'", ",", "False", ")", ":", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "log_output", "(", "better_exec_command", "(", "ssh", ",", "\"mkdir -p {0}\"", ".", "format", "(", "install_path", ")", ",", "\"Failed to create path {0}\"", ".", "format", "(", "install_path", ")", ")", ")", "log_output", "(", "better_exec_command", "(", "ssh", ",", "\"chmod 755 {0}\"", ".", "format", "(", "install_path", ")", ",", "\"Failed to make path {0} writeable\"", ".", "format", "(", "install_path", ")", ")", ")", "executable", "=", "configs", ".", "get", "(", "'executable'", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'executable'", ")", "if", "executable", "is", "None", ":", "logger", ".", "error", "(", "\"executable was not provided for unique_id: \"", "+", "unique_id", ")", "raise", "DeploymentError", "(", "\"executable was not provided for unique_id: \"", "+", "unique_id", ")", "#if the executable is in remote location copy to local machine", "copy_from_remote_location", "=", "False", "if", "(", "\":\"", "in", "executable", ")", ":", "copy_from_remote_location", "=", "True", "if", "(", "\"http\"", "not", "in", "executable", ")", ":", "remote_location_server", "=", "executable", ".", "split", "(", "\":\"", ")", "[", "0", "]", "remote_file_path", "=", "executable", ".", "split", "(", "\":\"", ")", "[", "1", "]", "remote_file_name", "=", "os", ".", "path", ".", "basename", "(", "remote_file_path", ")", "local_temp_file_name", "=", "os", ".", "path", ".", "join", "(", "configs", ".", "get", "(", "\"tmp_dir\"", ",", "\"/tmp\"", ")", ",", "remote_file_name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "local_temp_file_name", ")", ":", "with", "get_sftp_client", "(", "remote_location_server", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ftp", ":", "try", ":", "ftp", ".", "get", "(", "remote_file_path", ",", "local_temp_file_name", ")", "executable", "=", "local_temp_file_name", "except", ":", "raise", "DeploymentError", "(", "\"Unable to load file from remote server \"", "+", "executable", ")", "#use urllib for http copy", "else", ":", "remote_file_name", "=", "executable", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "local_temp_file_name", "=", "os", ".", "path", ".", "join", "(", "configs", ".", "get", "(", "\"tmp_dir\"", ",", "\"/tmp\"", ")", ",", "remote_file_name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "local_temp_file_name", ")", ":", "try", ":", "urllib", ".", "urlretrieve", "(", "executable", ",", "local_temp_file_name", ")", "except", ":", "raise", "DeploymentError", "(", "\"Unable to load file from remote server \"", "+", "executable", ")", "executable", "=", "local_temp_file_name", "try", ":", "exec_name", "=", "os", ".", "path", ".", "basename", "(", "executable", ")", "install_location", "=", "os", ".", "path", ".", "join", "(", "install_path", ",", "exec_name", ")", "with", "get_sftp_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ftp", ":", "ftp", ".", "put", "(", "executable", ",", "install_location", ")", "except", ":", "raise", "DeploymentError", "(", "\"Unable to copy executable to install_location:\"", "+", "install_location", ")", "finally", ":", "#Track if its a tarfile or zipfile before deleting it in case the copy to remote location fails", "is_tarfile", "=", "tarfile", ".", "is_tarfile", "(", "executable", ")", "is_zipfile", "=", "zipfile", ".", "is_zipfile", "(", "executable", ")", "if", "(", "copy_from_remote_location", "and", "not", "configs", ".", "get", "(", "'cache'", ",", "False", ")", ")", ":", "os", ".", "remove", "(", "executable", ")", "# only supports tar and zip (because those modules are provided by Python's standard library)", "if", "configs", ".", "get", "(", "'extract'", ",", "False", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'extract'", ",", "False", ")", ":", "if", "is_tarfile", ":", "log_output", "(", "better_exec_command", "(", "ssh", ",", "\"tar -xf {0} -C {1}\"", ".", "format", "(", "install_location", ",", "install_path", ")", ",", "\"Failed to extract tarfile {0}\"", ".", "format", "(", "exec_name", ")", ")", ")", "elif", "is_zipfile", ":", "log_output", "(", "better_exec_command", "(", "ssh", ",", "\"unzip -o {0} -d {1}\"", ".", "format", "(", "install_location", ",", "install_path", ")", ",", "\"Failed to extract zipfile {0}\"", ".", "format", "(", "exec_name", ")", ")", ")", "else", ":", "logger", ".", "error", "(", "executable", "+", "\" is not a supported filetype for extracting\"", ")", "raise", "DeploymentError", "(", "executable", "+", "\" is not a supported filetype for extracting\"", ")", "post_install_cmds", "=", "configs", ".", "get", "(", "'post_install_cmds'", ",", "False", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'post_install_cmds'", ",", "[", "]", ")", "for", "cmd", "in", "post_install_cmds", ":", "relative_cmd", "=", "\"cd {0}; {1}\"", ".", "format", "(", "install_path", ",", "cmd", ")", "log_output", "(", "exec_with_env", "(", "ssh", ",", "relative_cmd", ",", "msg", "=", "\"Failed to execute post install command: {0}\"", ".", "format", "(", "relative_cmd", ")", ",", "env", "=", "env", ")", ")", "self", ".", "processes", "[", "unique_id", "]", "=", "Process", "(", "unique_id", ",", "self", ".", "service_name", ",", "hostname", ",", "install_path", ")", "self", ".", "processes", "[", "unique_id", "]", ".", "pid_file", "=", "pid_file" ]
Copies the executable to the remote machine under install path. Inspects the configs for the possible keys 'hostname': the host to install on 'install_path': the location on the remote host 'executable': the executable to copy 'no_copy': if this config is passed in and true then this method will not copy the executable assuming that it is already installed 'post_install_cmds': an optional list of commands that should be executed on the remote machine after the executable has been installed. If no_copy is set to true, then the post install commands will not be run. If the unique_id is already installed on a different host, this will perform the cleanup action first. If either 'install_path' or 'executable' are provided the new value will become the default. :param unique_id: :param configs: :return:
[ "Copies", "the", "executable", "to", "the", "remote", "machine", "under", "install", "path", ".", "Inspects", "the", "configs", "for", "the", "possible", "keys", "hostname", ":", "the", "host", "to", "install", "on", "install_path", ":", "the", "location", "on", "the", "remote", "host", "executable", ":", "the", "executable", "to", "copy", "no_copy", ":", "if", "this", "config", "is", "passed", "in", "and", "true", "then", "this", "method", "will", "not", "copy", "the", "executable", "assuming", "that", "it", "is", "already", "installed", "post_install_cmds", ":", "an", "optional", "list", "of", "commands", "that", "should", "be", "executed", "on", "the", "remote", "machine", "after", "the", "executable", "has", "been", "installed", ".", "If", "no_copy", "is", "set", "to", "true", "then", "the", "post", "install", "commands", "will", "not", "be", "run", "." ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L78-L197
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.start
def start(self, unique_id, configs=None): """ Start the service. If `unique_id` has already been installed the deployer will start the service on that host. Otherwise this will call install with the configs. Within the context of this function, only four configs are considered 'start_command': the command to run (if provided will replace the default) 'args': a list of args that can be passed to the command 'sync': if the command is synchronous or asynchronous defaults to asynchronous 'delay': a delay in seconds that might be needed regardless of whether the command returns before the service can be started :param unique_id: :param configs: :return: if the command is executed synchronously return the underlying paramiko channel which can be used to get the stdout otherwise return the triple stdin, stdout, stderr """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("starting " + unique_id) # do not start if already started if self.get_pid(unique_id, configs) is not constants.PROCESS_NOT_RUNNING_PID: return None if unique_id not in self.processes: self.install(unique_id, configs) hostname = self.processes[unique_id].hostname install_path = self.processes[unique_id].install_path # order of precedence for start_command and args from highest to lowest: # 1. configs # 2. from Process # 3. from Deployer start_command = configs.get('start_command') or self.processes[unique_id].start_command or self.default_configs.get('start_command') pid_file = configs.get('pid_file') or self.default_configs.get('pid_file') if start_command is None: logger.error("start_command was not provided for unique_id: " + unique_id) raise DeploymentError("start_command was not provided for unique_id: " + unique_id) args = configs.get('args') or self.processes[unique_id].args or self.default_configs.get('args') if args is not None: full_start_command = "{0} {1}".format(start_command, ' '.join(args)) else: full_start_command = start_command command = "cd {0}; {1}".format(install_path, full_start_command) env = configs.get("env", {}) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: exec_with_env(ssh, command, msg="Failed to start", env=env, sync=configs.get('sync', False)) self.processes[unique_id].start_command = start_command self.processes[unique_id].args = args # For cases where user pases it with start command if self.processes[unique_id].pid_file is None: self.processes[unique_id].pid_file = pid_file if 'delay' in configs: time.sleep(configs['delay'])
python
def start(self, unique_id, configs=None): """ Start the service. If `unique_id` has already been installed the deployer will start the service on that host. Otherwise this will call install with the configs. Within the context of this function, only four configs are considered 'start_command': the command to run (if provided will replace the default) 'args': a list of args that can be passed to the command 'sync': if the command is synchronous or asynchronous defaults to asynchronous 'delay': a delay in seconds that might be needed regardless of whether the command returns before the service can be started :param unique_id: :param configs: :return: if the command is executed synchronously return the underlying paramiko channel which can be used to get the stdout otherwise return the triple stdin, stdout, stderr """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("starting " + unique_id) # do not start if already started if self.get_pid(unique_id, configs) is not constants.PROCESS_NOT_RUNNING_PID: return None if unique_id not in self.processes: self.install(unique_id, configs) hostname = self.processes[unique_id].hostname install_path = self.processes[unique_id].install_path # order of precedence for start_command and args from highest to lowest: # 1. configs # 2. from Process # 3. from Deployer start_command = configs.get('start_command') or self.processes[unique_id].start_command or self.default_configs.get('start_command') pid_file = configs.get('pid_file') or self.default_configs.get('pid_file') if start_command is None: logger.error("start_command was not provided for unique_id: " + unique_id) raise DeploymentError("start_command was not provided for unique_id: " + unique_id) args = configs.get('args') or self.processes[unique_id].args or self.default_configs.get('args') if args is not None: full_start_command = "{0} {1}".format(start_command, ' '.join(args)) else: full_start_command = start_command command = "cd {0}; {1}".format(install_path, full_start_command) env = configs.get("env", {}) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: exec_with_env(ssh, command, msg="Failed to start", env=env, sync=configs.get('sync', False)) self.processes[unique_id].start_command = start_command self.processes[unique_id].args = args # For cases where user pases it with start command if self.processes[unique_id].pid_file is None: self.processes[unique_id].pid_file = pid_file if 'delay' in configs: time.sleep(configs['delay'])
[ "def", "start", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "# the following is necessay to set the configs for this function as the combination of the", "# default configurations and the parameter with the parameter superceding the defaults but", "# not modifying the defaults", "if", "configs", "is", "None", ":", "configs", "=", "{", "}", "tmp", "=", "self", ".", "default_configs", ".", "copy", "(", ")", "tmp", ".", "update", "(", "configs", ")", "configs", "=", "tmp", "logger", ".", "debug", "(", "\"starting \"", "+", "unique_id", ")", "# do not start if already started", "if", "self", ".", "get_pid", "(", "unique_id", ",", "configs", ")", "is", "not", "constants", ".", "PROCESS_NOT_RUNNING_PID", ":", "return", "None", "if", "unique_id", "not", "in", "self", ".", "processes", ":", "self", ".", "install", "(", "unique_id", ",", "configs", ")", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "install_path", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "install_path", "# order of precedence for start_command and args from highest to lowest:", "# 1. configs", "# 2. from Process", "# 3. from Deployer", "start_command", "=", "configs", ".", "get", "(", "'start_command'", ")", "or", "self", ".", "processes", "[", "unique_id", "]", ".", "start_command", "or", "self", ".", "default_configs", ".", "get", "(", "'start_command'", ")", "pid_file", "=", "configs", ".", "get", "(", "'pid_file'", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'pid_file'", ")", "if", "start_command", "is", "None", ":", "logger", ".", "error", "(", "\"start_command was not provided for unique_id: \"", "+", "unique_id", ")", "raise", "DeploymentError", "(", "\"start_command was not provided for unique_id: \"", "+", "unique_id", ")", "args", "=", "configs", ".", "get", "(", "'args'", ")", "or", "self", ".", "processes", "[", "unique_id", "]", ".", "args", "or", "self", ".", "default_configs", ".", "get", "(", "'args'", ")", "if", "args", "is", "not", "None", ":", "full_start_command", "=", "\"{0} {1}\"", ".", "format", "(", "start_command", ",", "' '", ".", "join", "(", "args", ")", ")", "else", ":", "full_start_command", "=", "start_command", "command", "=", "\"cd {0}; {1}\"", ".", "format", "(", "install_path", ",", "full_start_command", ")", "env", "=", "configs", ".", "get", "(", "\"env\"", ",", "{", "}", ")", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "exec_with_env", "(", "ssh", ",", "command", ",", "msg", "=", "\"Failed to start\"", ",", "env", "=", "env", ",", "sync", "=", "configs", ".", "get", "(", "'sync'", ",", "False", ")", ")", "self", ".", "processes", "[", "unique_id", "]", ".", "start_command", "=", "start_command", "self", ".", "processes", "[", "unique_id", "]", ".", "args", "=", "args", "# For cases where user pases it with start command", "if", "self", ".", "processes", "[", "unique_id", "]", ".", "pid_file", "is", "None", ":", "self", ".", "processes", "[", "unique_id", "]", ".", "pid_file", "=", "pid_file", "if", "'delay'", "in", "configs", ":", "time", ".", "sleep", "(", "configs", "[", "'delay'", "]", ")" ]
Start the service. If `unique_id` has already been installed the deployer will start the service on that host. Otherwise this will call install with the configs. Within the context of this function, only four configs are considered 'start_command': the command to run (if provided will replace the default) 'args': a list of args that can be passed to the command 'sync': if the command is synchronous or asynchronous defaults to asynchronous 'delay': a delay in seconds that might be needed regardless of whether the command returns before the service can be started :param unique_id: :param configs: :return: if the command is executed synchronously return the underlying paramiko channel which can be used to get the stdout otherwise return the triple stdin, stdout, stderr
[ "Start", "the", "service", ".", "If", "unique_id", "has", "already", "been", "installed", "the", "deployer", "will", "start", "the", "service", "on", "that", "host", ".", "Otherwise", "this", "will", "call", "install", "with", "the", "configs", ".", "Within", "the", "context", "of", "this", "function", "only", "four", "configs", "are", "considered", "start_command", ":", "the", "command", "to", "run", "(", "if", "provided", "will", "replace", "the", "default", ")", "args", ":", "a", "list", "of", "args", "that", "can", "be", "passed", "to", "the", "command", "sync", ":", "if", "the", "command", "is", "synchronous", "or", "asynchronous", "defaults", "to", "asynchronous", "delay", ":", "a", "delay", "in", "seconds", "that", "might", "be", "needed", "regardless", "of", "whether", "the", "command", "returns", "before", "the", "service", "can", "be", "started" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L199-L262
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.stop
def stop(self, unique_id, configs=None): """Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("stopping " + unique_id) if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't stop {0}: process not known".format(unique_id)) raise DeploymentError("Can't stop {0}: process not known".format(unique_id)) if configs.get('terminate_only', False): self.terminate(unique_id, configs) else: stop_command = configs.get('stop_command') or self.default_configs.get('stop_command') env = configs.get("env", {}) if stop_command is not None: install_path = self.processes[unique_id].install_path with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command), msg="Failed to stop {0}".format(unique_id), env=env)) else: self.terminate(unique_id, configs) if 'delay' in configs: time.sleep(configs['delay'])
python
def stop(self, unique_id, configs=None): """Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("stopping " + unique_id) if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't stop {0}: process not known".format(unique_id)) raise DeploymentError("Can't stop {0}: process not known".format(unique_id)) if configs.get('terminate_only', False): self.terminate(unique_id, configs) else: stop_command = configs.get('stop_command') or self.default_configs.get('stop_command') env = configs.get("env", {}) if stop_command is not None: install_path = self.processes[unique_id].install_path with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command), msg="Failed to stop {0}".format(unique_id), env=env)) else: self.terminate(unique_id, configs) if 'delay' in configs: time.sleep(configs['delay'])
[ "def", "stop", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "# the following is necessay to set the configs for this function as the combination of the", "# default configurations and the parameter with the parameter superceding the defaults but", "# not modifying the defaults", "if", "configs", "is", "None", ":", "configs", "=", "{", "}", "tmp", "=", "self", ".", "default_configs", ".", "copy", "(", ")", "tmp", ".", "update", "(", "configs", ")", "configs", "=", "tmp", "logger", ".", "debug", "(", "\"stopping \"", "+", "unique_id", ")", "if", "unique_id", "in", "self", ".", "processes", ":", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "else", ":", "logger", ".", "error", "(", "\"Can't stop {0}: process not known\"", ".", "format", "(", "unique_id", ")", ")", "raise", "DeploymentError", "(", "\"Can't stop {0}: process not known\"", ".", "format", "(", "unique_id", ")", ")", "if", "configs", ".", "get", "(", "'terminate_only'", ",", "False", ")", ":", "self", ".", "terminate", "(", "unique_id", ",", "configs", ")", "else", ":", "stop_command", "=", "configs", ".", "get", "(", "'stop_command'", ")", "or", "self", ".", "default_configs", ".", "get", "(", "'stop_command'", ")", "env", "=", "configs", ".", "get", "(", "\"env\"", ",", "{", "}", ")", "if", "stop_command", "is", "not", "None", ":", "install_path", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "install_path", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "log_output", "(", "exec_with_env", "(", "ssh", ",", "\"cd {0}; {1}\"", ".", "format", "(", "install_path", ",", "stop_command", ")", ",", "msg", "=", "\"Failed to stop {0}\"", ".", "format", "(", "unique_id", ")", ",", "env", "=", "env", ")", ")", "else", ":", "self", ".", "terminate", "(", "unique_id", ",", "configs", ")", "if", "'delay'", "in", "configs", ":", "time", ".", "sleep", "(", "configs", "[", "'delay'", "]", ")" ]
Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return:
[ "Stop", "the", "service", ".", "If", "the", "deployer", "has", "not", "started", "a", "service", "with", "unique_id", "the", "deployer", "will", "raise", "an", "Exception", "There", "are", "two", "configs", "that", "will", "be", "considered", ":", "terminate_only", ":", "if", "this", "config", "is", "passed", "in", "then", "this", "method", "is", "the", "same", "as", "terminate", "(", "unique_id", ")", "(", "this", "is", "also", "the", "behavior", "if", "stop_command", "is", "None", "and", "not", "overridden", ")", "stop_command", ":", "overrides", "the", "default", "stop_command" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L264-L306
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.uninstall
def uninstall(self, unique_id, configs=None): """uninstall the service. If the deployer has not started a service with `unique_id` this will raise a DeploymentError. This considers one config: 'additional_directories': a list of directories to remove in addition to those provided in the constructor plus the install path. This will update the directories to remove but does not override it :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't uninstall {0}: process not known".format(unique_id)) raise DeploymentError("Can't uninstall {0}: process not known".format(unique_id)) install_path = self.processes[unique_id].install_path directories_to_remove = self.default_configs.get('directories_to_clean', []) directories_to_remove.extend(configs.get('additional_directories', [])) if install_path not in directories_to_remove: directories_to_remove.append(install_path) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: for directory_to_remove in directories_to_remove: log_output(better_exec_command(ssh, "rm -rf {0}".format(directory_to_remove), "Failed to remove {0}".format(directory_to_remove)))
python
def uninstall(self, unique_id, configs=None): """uninstall the service. If the deployer has not started a service with `unique_id` this will raise a DeploymentError. This considers one config: 'additional_directories': a list of directories to remove in addition to those provided in the constructor plus the install path. This will update the directories to remove but does not override it :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't uninstall {0}: process not known".format(unique_id)) raise DeploymentError("Can't uninstall {0}: process not known".format(unique_id)) install_path = self.processes[unique_id].install_path directories_to_remove = self.default_configs.get('directories_to_clean', []) directories_to_remove.extend(configs.get('additional_directories', [])) if install_path not in directories_to_remove: directories_to_remove.append(install_path) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: for directory_to_remove in directories_to_remove: log_output(better_exec_command(ssh, "rm -rf {0}".format(directory_to_remove), "Failed to remove {0}".format(directory_to_remove)))
[ "def", "uninstall", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "# the following is necessay to set the configs for this function as the combination of the", "# default configurations and the parameter with the parameter superceding the defaults but", "# not modifying the defaults", "if", "configs", "is", "None", ":", "configs", "=", "{", "}", "tmp", "=", "self", ".", "default_configs", ".", "copy", "(", ")", "tmp", ".", "update", "(", "configs", ")", "configs", "=", "tmp", "if", "unique_id", "in", "self", ".", "processes", ":", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "else", ":", "logger", ".", "error", "(", "\"Can't uninstall {0}: process not known\"", ".", "format", "(", "unique_id", ")", ")", "raise", "DeploymentError", "(", "\"Can't uninstall {0}: process not known\"", ".", "format", "(", "unique_id", ")", ")", "install_path", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "install_path", "directories_to_remove", "=", "self", ".", "default_configs", ".", "get", "(", "'directories_to_clean'", ",", "[", "]", ")", "directories_to_remove", ".", "extend", "(", "configs", ".", "get", "(", "'additional_directories'", ",", "[", "]", ")", ")", "if", "install_path", "not", "in", "directories_to_remove", ":", "directories_to_remove", ".", "append", "(", "install_path", ")", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "for", "directory_to_remove", "in", "directories_to_remove", ":", "log_output", "(", "better_exec_command", "(", "ssh", ",", "\"rm -rf {0}\"", ".", "format", "(", "directory_to_remove", ")", ",", "\"Failed to remove {0}\"", ".", "format", "(", "directory_to_remove", ")", ")", ")" ]
uninstall the service. If the deployer has not started a service with `unique_id` this will raise a DeploymentError. This considers one config: 'additional_directories': a list of directories to remove in addition to those provided in the constructor plus the install path. This will update the directories to remove but does not override it :param unique_id: :param configs: :return:
[ "uninstall", "the", "service", ".", "If", "the", "deployer", "has", "not", "started", "a", "service", "with", "unique_id", "this", "will", "raise", "a", "DeploymentError", ".", "This", "considers", "one", "config", ":", "additional_directories", ":", "a", "list", "of", "directories", "to", "remove", "in", "addition", "to", "those", "provided", "in", "the", "constructor", "plus", "the", "install", "path", ".", "This", "will", "update", "the", "directories", "to", "remove", "but", "does", "not", "override", "it", ":", "param", "unique_id", ":", ":", "param", "configs", ":", ":", "return", ":" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L308-L340
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.get_pid
def get_pid(self, unique_id, configs=None): """Gets the pid of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of constants.PROCESS_NOT_RUNNING_PID """ RECV_BLOCK_SIZE = 16 # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: return constants.PROCESS_NOT_RUNNING_PID if self.processes[unique_id].start_command is None: return constants.PROCESS_NOT_RUNNING_PID if self.processes[unique_id].pid_file is not None: with open_remote_file(hostname, self.processes[unique_id].pid_file, username=runtime.get_username(), password=runtime.get_password()) as pid_file: full_output = pid_file.read() elif 'pid_file' in configs.keys(): with open_remote_file(hostname, configs['pid_file'], username=runtime.get_username(), password=runtime.get_password()) as pid_file: full_output = pid_file.read() else: pid_keyword = self.processes[unique_id].start_command if self.processes[unique_id].args is not None: pid_keyword = "{0} {1}".format(pid_keyword, ' '.join(self.processes[unique_id].args)) pid_keyword = configs.get('pid_keyword', pid_keyword) # TODO(jehrlich): come up with a simpler approach to this pid_command = "ps aux | grep '{0}' | grep -v grep | tr -s ' ' | cut -d ' ' -f 2 | grep -Eo '[0-9]+'".format(pid_keyword) pid_command = configs.get('pid_command', pid_command) non_failing_command = "{0}; if [ $? -le 1 ]; then true; else false; fi;".format(pid_command) env = configs.get("env", {}) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: chan = exec_with_env(ssh, non_failing_command, msg="Failed to get PID", env=env) output = chan.recv(RECV_BLOCK_SIZE) full_output = output while len(output) > 0: output = chan.recv(RECV_BLOCK_SIZE) full_output += output if len(full_output) > 0: pids = [int(pid_str) for pid_str in full_output.split('\n') if pid_str.isdigit()] if len(pids) > 0: return pids return constants.PROCESS_NOT_RUNNING_PID
python
def get_pid(self, unique_id, configs=None): """Gets the pid of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of constants.PROCESS_NOT_RUNNING_PID """ RECV_BLOCK_SIZE = 16 # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: return constants.PROCESS_NOT_RUNNING_PID if self.processes[unique_id].start_command is None: return constants.PROCESS_NOT_RUNNING_PID if self.processes[unique_id].pid_file is not None: with open_remote_file(hostname, self.processes[unique_id].pid_file, username=runtime.get_username(), password=runtime.get_password()) as pid_file: full_output = pid_file.read() elif 'pid_file' in configs.keys(): with open_remote_file(hostname, configs['pid_file'], username=runtime.get_username(), password=runtime.get_password()) as pid_file: full_output = pid_file.read() else: pid_keyword = self.processes[unique_id].start_command if self.processes[unique_id].args is not None: pid_keyword = "{0} {1}".format(pid_keyword, ' '.join(self.processes[unique_id].args)) pid_keyword = configs.get('pid_keyword', pid_keyword) # TODO(jehrlich): come up with a simpler approach to this pid_command = "ps aux | grep '{0}' | grep -v grep | tr -s ' ' | cut -d ' ' -f 2 | grep -Eo '[0-9]+'".format(pid_keyword) pid_command = configs.get('pid_command', pid_command) non_failing_command = "{0}; if [ $? -le 1 ]; then true; else false; fi;".format(pid_command) env = configs.get("env", {}) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: chan = exec_with_env(ssh, non_failing_command, msg="Failed to get PID", env=env) output = chan.recv(RECV_BLOCK_SIZE) full_output = output while len(output) > 0: output = chan.recv(RECV_BLOCK_SIZE) full_output += output if len(full_output) > 0: pids = [int(pid_str) for pid_str in full_output.split('\n') if pid_str.isdigit()] if len(pids) > 0: return pids return constants.PROCESS_NOT_RUNNING_PID
[ "def", "get_pid", "(", "self", ",", "unique_id", ",", "configs", "=", "None", ")", ":", "RECV_BLOCK_SIZE", "=", "16", "# the following is necessay to set the configs for this function as the combination of the", "# default configurations and the parameter with the parameter superceding the defaults but", "# not modifying the defaults", "if", "configs", "is", "None", ":", "configs", "=", "{", "}", "tmp", "=", "self", ".", "default_configs", ".", "copy", "(", ")", "tmp", ".", "update", "(", "configs", ")", "configs", "=", "tmp", "if", "unique_id", "in", "self", ".", "processes", ":", "hostname", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "else", ":", "return", "constants", ".", "PROCESS_NOT_RUNNING_PID", "if", "self", ".", "processes", "[", "unique_id", "]", ".", "start_command", "is", "None", ":", "return", "constants", ".", "PROCESS_NOT_RUNNING_PID", "if", "self", ".", "processes", "[", "unique_id", "]", ".", "pid_file", "is", "not", "None", ":", "with", "open_remote_file", "(", "hostname", ",", "self", ".", "processes", "[", "unique_id", "]", ".", "pid_file", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "pid_file", ":", "full_output", "=", "pid_file", ".", "read", "(", ")", "elif", "'pid_file'", "in", "configs", ".", "keys", "(", ")", ":", "with", "open_remote_file", "(", "hostname", ",", "configs", "[", "'pid_file'", "]", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "pid_file", ":", "full_output", "=", "pid_file", ".", "read", "(", ")", "else", ":", "pid_keyword", "=", "self", ".", "processes", "[", "unique_id", "]", ".", "start_command", "if", "self", ".", "processes", "[", "unique_id", "]", ".", "args", "is", "not", "None", ":", "pid_keyword", "=", "\"{0} {1}\"", ".", "format", "(", "pid_keyword", ",", "' '", ".", "join", "(", "self", ".", "processes", "[", "unique_id", "]", ".", "args", ")", ")", "pid_keyword", "=", "configs", ".", "get", "(", "'pid_keyword'", ",", "pid_keyword", ")", "# TODO(jehrlich): come up with a simpler approach to this", "pid_command", "=", "\"ps aux | grep '{0}' | grep -v grep | tr -s ' ' | cut -d ' ' -f 2 | grep -Eo '[0-9]+'\"", ".", "format", "(", "pid_keyword", ")", "pid_command", "=", "configs", ".", "get", "(", "'pid_command'", ",", "pid_command", ")", "non_failing_command", "=", "\"{0}; if [ $? -le 1 ]; then true; else false; fi;\"", ".", "format", "(", "pid_command", ")", "env", "=", "configs", ".", "get", "(", "\"env\"", ",", "{", "}", ")", "with", "get_ssh_client", "(", "hostname", ",", "username", "=", "runtime", ".", "get_username", "(", ")", ",", "password", "=", "runtime", ".", "get_password", "(", ")", ")", "as", "ssh", ":", "chan", "=", "exec_with_env", "(", "ssh", ",", "non_failing_command", ",", "msg", "=", "\"Failed to get PID\"", ",", "env", "=", "env", ")", "output", "=", "chan", ".", "recv", "(", "RECV_BLOCK_SIZE", ")", "full_output", "=", "output", "while", "len", "(", "output", ")", ">", "0", ":", "output", "=", "chan", ".", "recv", "(", "RECV_BLOCK_SIZE", ")", "full_output", "+=", "output", "if", "len", "(", "full_output", ")", ">", "0", ":", "pids", "=", "[", "int", "(", "pid_str", ")", "for", "pid_str", "in", "full_output", ".", "split", "(", "'\\n'", ")", "if", "pid_str", ".", "isdigit", "(", ")", "]", "if", "len", "(", "pids", ")", ">", "0", ":", "return", "pids", "return", "constants", ".", "PROCESS_NOT_RUNNING_PID" ]
Gets the pid of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of constants.PROCESS_NOT_RUNNING_PID
[ "Gets", "the", "pid", "of", "the", "process", "with", "unique_id", ".", "If", "the", "deployer", "does", "not", "know", "of", "a", "process", "with", "unique_id", "then", "it", "should", "return", "a", "value", "of", "constants", ".", "PROCESS_NOT_RUNNING_PID" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L342-L394
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.get_host
def get_host(self, unique_id): """Gets the host of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of SOME_SENTINAL_VALUE :Parameter unique_id: the name of the process :raises NameError if the name is not valid process """ if unique_id in self.processes: return self.processes[unique_id].hostname logger.error("{0} not a known process".format(unique_id)) raise NameError("{0} not a known process".format(unique_id))
python
def get_host(self, unique_id): """Gets the host of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of SOME_SENTINAL_VALUE :Parameter unique_id: the name of the process :raises NameError if the name is not valid process """ if unique_id in self.processes: return self.processes[unique_id].hostname logger.error("{0} not a known process".format(unique_id)) raise NameError("{0} not a known process".format(unique_id))
[ "def", "get_host", "(", "self", ",", "unique_id", ")", ":", "if", "unique_id", "in", "self", ".", "processes", ":", "return", "self", ".", "processes", "[", "unique_id", "]", ".", "hostname", "logger", ".", "error", "(", "\"{0} not a known process\"", ".", "format", "(", "unique_id", ")", ")", "raise", "NameError", "(", "\"{0} not a known process\"", ".", "format", "(", "unique_id", ")", ")" ]
Gets the host of the process with `unique_id`. If the deployer does not know of a process with `unique_id` then it should return a value of SOME_SENTINAL_VALUE :Parameter unique_id: the name of the process :raises NameError if the name is not valid process
[ "Gets", "the", "host", "of", "the", "process", "with", "unique_id", ".", "If", "the", "deployer", "does", "not", "know", "of", "a", "process", "with", "unique_id", "then", "it", "should", "return", "a", "value", "of", "SOME_SENTINAL_VALUE" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L396-L406
train
linkedin/Zopkio
zopkio/adhoc_deployer.py
SSHDeployer.kill_all_process
def kill_all_process(self): """ Terminates all the running processes. By default it is set to false. Users can set to true in config once the method to get_pid is done deterministically either using pid_file or an accurate keyword """ if (runtime.get_active_config("cleanup_pending_process",False)): for process in self.get_processes(): self.terminate(process.unique_id)
python
def kill_all_process(self): """ Terminates all the running processes. By default it is set to false. Users can set to true in config once the method to get_pid is done deterministically either using pid_file or an accurate keyword """ if (runtime.get_active_config("cleanup_pending_process",False)): for process in self.get_processes(): self.terminate(process.unique_id)
[ "def", "kill_all_process", "(", "self", ")", ":", "if", "(", "runtime", ".", "get_active_config", "(", "\"cleanup_pending_process\"", ",", "False", ")", ")", ":", "for", "process", "in", "self", ".", "get_processes", "(", ")", ":", "self", ".", "terminate", "(", "process", ".", "unique_id", ")" ]
Terminates all the running processes. By default it is set to false. Users can set to true in config once the method to get_pid is done deterministically either using pid_file or an accurate keyword
[ "Terminates", "all", "the", "running", "processes", ".", "By", "default", "it", "is", "set", "to", "false", ".", "Users", "can", "set", "to", "true", "in", "config", "once", "the", "method", "to", "get_pid", "is", "done", "deterministically", "either", "using", "pid_file", "or", "an", "accurate", "keyword" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/adhoc_deployer.py#L415-L423
train
linkedin/Zopkio
zopkio/__main__.py
string_to_level
def string_to_level(log_level): """ Converts a string to the corresponding log level """ if (log_level.strip().upper() == "DEBUG"): return logging.DEBUG if (log_level.strip().upper() == "INFO"): return logging.INFO if (log_level.strip().upper() == "WARNING"): return logging.WARNING if (log_level.strip().upper() == "ERROR"): return logging.ERROR
python
def string_to_level(log_level): """ Converts a string to the corresponding log level """ if (log_level.strip().upper() == "DEBUG"): return logging.DEBUG if (log_level.strip().upper() == "INFO"): return logging.INFO if (log_level.strip().upper() == "WARNING"): return logging.WARNING if (log_level.strip().upper() == "ERROR"): return logging.ERROR
[ "def", "string_to_level", "(", "log_level", ")", ":", "if", "(", "log_level", ".", "strip", "(", ")", ".", "upper", "(", ")", "==", "\"DEBUG\"", ")", ":", "return", "logging", ".", "DEBUG", "if", "(", "log_level", ".", "strip", "(", ")", ".", "upper", "(", ")", "==", "\"INFO\"", ")", ":", "return", "logging", ".", "INFO", "if", "(", "log_level", ".", "strip", "(", ")", ".", "upper", "(", ")", "==", "\"WARNING\"", ")", ":", "return", "logging", ".", "WARNING", "if", "(", "log_level", ".", "strip", "(", ")", ".", "upper", "(", ")", "==", "\"ERROR\"", ")", ":", "return", "logging", ".", "ERROR" ]
Converts a string to the corresponding log level
[ "Converts", "a", "string", "to", "the", "corresponding", "log", "level" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/__main__.py#L54-L65
train
linkedin/Zopkio
zopkio/__main__.py
main
def main(): """ Parse command line arguments and then run the test suite """ parser = argparse.ArgumentParser(description='A distributed test framework') parser.add_argument('testfile', help='The file that is used to determine the test suite run') parser.add_argument('--test-only', nargs='*', dest='test_list', help='run only the named tests to help debug broken tests') parser.add_argument('--machine-list', nargs='*', dest='machine_list', help='''mapping of logical host names to physical names allowing the same test suite to run on different hardware, each argument is a pair of logical name and physical name separated by a =''') parser.add_argument('--config-overrides', nargs='*', dest='config_overrides', help='''config overrides at execution time, each argument is a config with its value separated by a =. This has the highest priority of all configs''') parser.add_argument('-d', '--output-dir', dest='output_dir', help='''Directory to write output files and logs. Defaults to the current directory.''') parser.add_argument("--log-level", dest="log_level", help="Log level (default INFO)", default="INFO") parser.add_argument("--console-log-level", dest="console_level", help="Console Log level (default ERROR)", default="ERROR") parser.add_argument("--nopassword", action='store_true', dest="nopassword", help="Disable password prompt") parser.add_argument("--user", dest="user", help="user to run the test as (defaults to current user)") args = parser.parse_args() try: call_main(args) except ValueError: #We only sys.exit here, as call_main is used as part of a unit test #and should not exit the system sys.exit(1)
python
def main(): """ Parse command line arguments and then run the test suite """ parser = argparse.ArgumentParser(description='A distributed test framework') parser.add_argument('testfile', help='The file that is used to determine the test suite run') parser.add_argument('--test-only', nargs='*', dest='test_list', help='run only the named tests to help debug broken tests') parser.add_argument('--machine-list', nargs='*', dest='machine_list', help='''mapping of logical host names to physical names allowing the same test suite to run on different hardware, each argument is a pair of logical name and physical name separated by a =''') parser.add_argument('--config-overrides', nargs='*', dest='config_overrides', help='''config overrides at execution time, each argument is a config with its value separated by a =. This has the highest priority of all configs''') parser.add_argument('-d', '--output-dir', dest='output_dir', help='''Directory to write output files and logs. Defaults to the current directory.''') parser.add_argument("--log-level", dest="log_level", help="Log level (default INFO)", default="INFO") parser.add_argument("--console-log-level", dest="console_level", help="Console Log level (default ERROR)", default="ERROR") parser.add_argument("--nopassword", action='store_true', dest="nopassword", help="Disable password prompt") parser.add_argument("--user", dest="user", help="user to run the test as (defaults to current user)") args = parser.parse_args() try: call_main(args) except ValueError: #We only sys.exit here, as call_main is used as part of a unit test #and should not exit the system sys.exit(1)
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'A distributed test framework'", ")", "parser", ".", "add_argument", "(", "'testfile'", ",", "help", "=", "'The file that is used to determine the test suite run'", ")", "parser", ".", "add_argument", "(", "'--test-only'", ",", "nargs", "=", "'*'", ",", "dest", "=", "'test_list'", ",", "help", "=", "'run only the named tests to help debug broken tests'", ")", "parser", ".", "add_argument", "(", "'--machine-list'", ",", "nargs", "=", "'*'", ",", "dest", "=", "'machine_list'", ",", "help", "=", "'''mapping of logical host names to physical names allowing the same\n test suite to run on different hardware, each argument is a pair\n of logical name and physical name separated by a ='''", ")", "parser", ".", "add_argument", "(", "'--config-overrides'", ",", "nargs", "=", "'*'", ",", "dest", "=", "'config_overrides'", ",", "help", "=", "'''config overrides at execution time, each argument is a config with\n its value separated by a =. This has the highest priority of all\n configs'''", ")", "parser", ".", "add_argument", "(", "'-d'", ",", "'--output-dir'", ",", "dest", "=", "'output_dir'", ",", "help", "=", "'''Directory to write output files and logs. Defaults to the current\n directory.'''", ")", "parser", ".", "add_argument", "(", "\"--log-level\"", ",", "dest", "=", "\"log_level\"", ",", "help", "=", "\"Log level (default INFO)\"", ",", "default", "=", "\"INFO\"", ")", "parser", ".", "add_argument", "(", "\"--console-log-level\"", ",", "dest", "=", "\"console_level\"", ",", "help", "=", "\"Console Log level (default ERROR)\"", ",", "default", "=", "\"ERROR\"", ")", "parser", ".", "add_argument", "(", "\"--nopassword\"", ",", "action", "=", "'store_true'", ",", "dest", "=", "\"nopassword\"", ",", "help", "=", "\"Disable password prompt\"", ")", "parser", ".", "add_argument", "(", "\"--user\"", ",", "dest", "=", "\"user\"", ",", "help", "=", "\"user to run the test as (defaults to current user)\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "try", ":", "call_main", "(", "args", ")", "except", "ValueError", ":", "#We only sys.exit here, as call_main is used as part of a unit test", "#and should not exit the system", "sys", ".", "exit", "(", "1", ")" ]
Parse command line arguments and then run the test suite
[ "Parse", "command", "line", "arguments", "and", "then", "run", "the", "test", "suite" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/__main__.py#L67-L105
train
linkedin/Zopkio
zopkio/runtime.py
reset_all
def reset_all(): """ Clear relevant globals to start fresh :return: """ global _username global _password global _active_config global _active_tests global _machine_names global _deployers reset_deployers() reset_collector() _username = None _password = None _active_config = None _active_tests = {} _machine_names = defaultdict()
python
def reset_all(): """ Clear relevant globals to start fresh :return: """ global _username global _password global _active_config global _active_tests global _machine_names global _deployers reset_deployers() reset_collector() _username = None _password = None _active_config = None _active_tests = {} _machine_names = defaultdict()
[ "def", "reset_all", "(", ")", ":", "global", "_username", "global", "_password", "global", "_active_config", "global", "_active_tests", "global", "_machine_names", "global", "_deployers", "reset_deployers", "(", ")", "reset_collector", "(", ")", "_username", "=", "None", "_password", "=", "None", "_active_config", "=", "None", "_active_tests", "=", "{", "}", "_machine_names", "=", "defaultdict", "(", ")" ]
Clear relevant globals to start fresh :return:
[ "Clear", "relevant", "globals", "to", "start", "fresh", ":", "return", ":" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/runtime.py#L163-L180
train
linkedin/Zopkio
zopkio/runtime.py
get_active_config
def get_active_config(config_option, default=None): """ gets the config value associated with the config_option or returns an empty string if the config is not found :param config_option: :param default: if not None, will be used :return: value of config. If key is not in config, then default will be used if default is not set to None. Otherwise, KeyError is thrown. """ return _active_config.mapping[config_option] if default is None else _active_config.mapping.get(config_option, default)
python
def get_active_config(config_option, default=None): """ gets the config value associated with the config_option or returns an empty string if the config is not found :param config_option: :param default: if not None, will be used :return: value of config. If key is not in config, then default will be used if default is not set to None. Otherwise, KeyError is thrown. """ return _active_config.mapping[config_option] if default is None else _active_config.mapping.get(config_option, default)
[ "def", "get_active_config", "(", "config_option", ",", "default", "=", "None", ")", ":", "return", "_active_config", ".", "mapping", "[", "config_option", "]", "if", "default", "is", "None", "else", "_active_config", ".", "mapping", ".", "get", "(", "config_option", ",", "default", ")" ]
gets the config value associated with the config_option or returns an empty string if the config is not found :param config_option: :param default: if not None, will be used :return: value of config. If key is not in config, then default will be used if default is not set to None. Otherwise, KeyError is thrown.
[ "gets", "the", "config", "value", "associated", "with", "the", "config_option", "or", "returns", "an", "empty", "string", "if", "the", "config", "is", "not", "found", ":", "param", "config_option", ":", ":", "param", "default", ":", "if", "not", "None", "will", "be", "used", ":", "return", ":", "value", "of", "config", ".", "If", "key", "is", "not", "in", "config", "then", "default", "will", "be", "used", "if", "default", "is", "not", "set", "to", "None", ".", "Otherwise", "KeyError", "is", "thrown", "." ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/runtime.py#L196-L204
train
linkedin/Zopkio
zopkio/html_reporter.py
Reporter.generate
def generate(self): """ Generates the report """ self._setup() header_html = self._generate_header() footer_html = self._generate_footer() results_topbar_html = self._generate_topbar("results") summary_topbar_html = self._generate_topbar("summary") logs_topbar_html = self._generate_topbar("logs") diff_topbar_html = self._generate_topbar("diff") summary_body_html = self._generate_summary_body() diff_body_html = self._generate_diff_body() summary_html = header_html + summary_topbar_html + summary_body_html + footer_html diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html Reporter._make_file(summary_html, self.report_info.home_page) Reporter._make_file(diff_html,self.report_info.diff_page) log_body_html = self._generate_log_body() log_html = header_html + logs_topbar_html + log_body_html+footer_html Reporter._make_file(log_html, self.report_info.log_page) for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) config_body_html = self._generate_config_body(config_name) config_html = header_html + results_topbar_html + config_body_html + footer_html config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx) Reporter._make_file(config_html, config_file) for test_name in self.data_source.get_test_names(config_name): test_body_html = self._generate_test_body(config_name, test_name) test_html = header_html + results_topbar_html + test_body_html + footer_html test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx) Reporter._make_file(test_html, test_file)
python
def generate(self): """ Generates the report """ self._setup() header_html = self._generate_header() footer_html = self._generate_footer() results_topbar_html = self._generate_topbar("results") summary_topbar_html = self._generate_topbar("summary") logs_topbar_html = self._generate_topbar("logs") diff_topbar_html = self._generate_topbar("diff") summary_body_html = self._generate_summary_body() diff_body_html = self._generate_diff_body() summary_html = header_html + summary_topbar_html + summary_body_html + footer_html diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html Reporter._make_file(summary_html, self.report_info.home_page) Reporter._make_file(diff_html,self.report_info.diff_page) log_body_html = self._generate_log_body() log_html = header_html + logs_topbar_html + log_body_html+footer_html Reporter._make_file(log_html, self.report_info.log_page) for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) config_body_html = self._generate_config_body(config_name) config_html = header_html + results_topbar_html + config_body_html + footer_html config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx) Reporter._make_file(config_html, config_file) for test_name in self.data_source.get_test_names(config_name): test_body_html = self._generate_test_body(config_name, test_name) test_html = header_html + results_topbar_html + test_body_html + footer_html test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx) Reporter._make_file(test_html, test_file)
[ "def", "generate", "(", "self", ")", ":", "self", ".", "_setup", "(", ")", "header_html", "=", "self", ".", "_generate_header", "(", ")", "footer_html", "=", "self", ".", "_generate_footer", "(", ")", "results_topbar_html", "=", "self", ".", "_generate_topbar", "(", "\"results\"", ")", "summary_topbar_html", "=", "self", ".", "_generate_topbar", "(", "\"summary\"", ")", "logs_topbar_html", "=", "self", ".", "_generate_topbar", "(", "\"logs\"", ")", "diff_topbar_html", "=", "self", ".", "_generate_topbar", "(", "\"diff\"", ")", "summary_body_html", "=", "self", ".", "_generate_summary_body", "(", ")", "diff_body_html", "=", "self", ".", "_generate_diff_body", "(", ")", "summary_html", "=", "header_html", "+", "summary_topbar_html", "+", "summary_body_html", "+", "footer_html", "diff_html", "=", "header_html", "+", "diff_topbar_html", "+", "diff_body_html", "+", "footer_html", "Reporter", ".", "_make_file", "(", "summary_html", ",", "self", ".", "report_info", ".", "home_page", ")", "Reporter", ".", "_make_file", "(", "diff_html", ",", "self", ".", "report_info", ".", "diff_page", ")", "log_body_html", "=", "self", ".", "_generate_log_body", "(", ")", "log_html", "=", "header_html", "+", "logs_topbar_html", "+", "log_body_html", "+", "footer_html", "Reporter", ".", "_make_file", "(", "log_html", ",", "self", ".", "report_info", ".", "log_page", ")", "for", "config_name", "in", "self", ".", "report_info", ".", "config_to_test_names_map", ".", "keys", "(", ")", ":", "config_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "report_info", ".", "resource_dir", ",", "config_name", ")", "utils", ".", "makedirs", "(", "config_dir", ")", "config_body_html", "=", "self", ".", "_generate_config_body", "(", "config_name", ")", "config_html", "=", "header_html", "+", "results_topbar_html", "+", "config_body_html", "+", "footer_html", "config_file", "=", "os", ".", "path", ".", "join", "(", "config_dir", ",", "config_name", "+", "self", ".", "report_info", ".", "report_file_sfx", ")", "Reporter", ".", "_make_file", "(", "config_html", ",", "config_file", ")", "for", "test_name", "in", "self", ".", "data_source", ".", "get_test_names", "(", "config_name", ")", ":", "test_body_html", "=", "self", ".", "_generate_test_body", "(", "config_name", ",", "test_name", ")", "test_html", "=", "header_html", "+", "results_topbar_html", "+", "test_body_html", "+", "footer_html", "test_file", "=", "os", ".", "path", ".", "join", "(", "config_dir", ",", "test_name", "+", "self", ".", "report_info", ".", "report_file_sfx", ")", "Reporter", ".", "_make_file", "(", "test_html", ",", "test_file", ")" ]
Generates the report
[ "Generates", "the", "report" ]
a06e35a884cd26eedca0aac8ba6b9b40c417a01c
https://github.com/linkedin/Zopkio/blob/a06e35a884cd26eedca0aac8ba6b9b40c417a01c/zopkio/html_reporter.py#L85-L122
train
dmwm/DBS
Server/Python/src/dbs/dao/Oracle/Dataset/UpdateType.py
UpdateType.execute
def execute ( self, conn, dataset, dataset_access_type, transaction=False ): """ for a given file """ if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Dataset/UpdateType. Expects db connection from upper layer.", self.logger.exception) binds = { "dataset" : dataset , "dataset_access_type" : dataset_access_type ,"myuser": dbsUtils().getCreateBy(), "mydate": dbsUtils().getTime() } result = self.dbi.processData(self.sql, binds, conn, transaction)
python
def execute ( self, conn, dataset, dataset_access_type, transaction=False ): """ for a given file """ if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Dataset/UpdateType. Expects db connection from upper layer.", self.logger.exception) binds = { "dataset" : dataset , "dataset_access_type" : dataset_access_type ,"myuser": dbsUtils().getCreateBy(), "mydate": dbsUtils().getTime() } result = self.dbi.processData(self.sql, binds, conn, transaction)
[ "def", "execute", "(", "self", ",", "conn", ",", "dataset", ",", "dataset_access_type", ",", "transaction", "=", "False", ")", ":", "if", "not", "conn", ":", "dbsExceptionHandler", "(", "\"dbsException-failed-connect2host\"", ",", "\"Oracle/Dataset/UpdateType. Expects db connection from upper layer.\"", ",", "self", ".", "logger", ".", "exception", ")", "binds", "=", "{", "\"dataset\"", ":", "dataset", ",", "\"dataset_access_type\"", ":", "dataset_access_type", ",", "\"myuser\"", ":", "dbsUtils", "(", ")", ".", "getCreateBy", "(", ")", ",", "\"mydate\"", ":", "dbsUtils", "(", ")", ".", "getTime", "(", ")", "}", "result", "=", "self", ".", "dbi", ".", "processData", "(", "self", ".", "sql", ",", "binds", ",", "conn", ",", "transaction", ")" ]
for a given file
[ "for", "a", "given", "file" ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/Dataset/UpdateType.py#L26-L33
train
dmwm/DBS
Server/Python/src/dbs/utils/DBSInputValidation.py
inputChecks
def inputChecks(**_params_): """ This is a function to check all the input for GET APIs. """ def checkTypes(_func_, _params_ = _params_): log = clog.error_log @wraps(_func_) def wrapped(*args, **kw): arg_names = _func_.__code__.co_varnames[:_func_.__code__.co_argcount] ka = {} ka.update(list(zip(arg_names, args))) ka.update(kw) #print ka for name, value in ka.iteritems(): #In fact the framework removes all the input variables that is not in the args list of _addMethod. #So DBS list API will never see these variables. For example, if one has #http://hostname/cms_dbs/DBS/datatiers?name=abc, the API will get a request to list all the datatiers because #"name=abc" is removed by the framework since name is not a key work for the api. if name !='self': types = _params_[name] #if name =='lumi_list': value = cjson.decode(value) if not isinstance(value, types): serverlog = "Expected '%s' to be %s; was %s." % (name, types, type(value)) #raise TypeError, "Expected '%s' to be %s; was %s." % (name, types, type(value)) dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input DataType %s for %s..." %(type(value), name[:10]),\ logger=log.error, serverError=serverlog) else: try: if isinstance(value, basestring): try: value = str(value) except: dbsExceptionHandler("dbsException-invalid-input", "invalid value for %s" %name) if name == 'dataset': if '*' in value: searchdataset(value) else: reading_dataset_check(value) elif name =='lumi_list': value = cjson.decode(value) elif name =='validFileOnly': try: int(value) except Exception as e: dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error) elif name =='sumOverLumi': try: int(value) except Exception as e: dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error) elif name =='block_name': if '*' in value: searchblock(value) else: reading_block_check(value) elif name =='primary_ds_name': if '*' in value: searchstr(value) else: primdataset(value) elif name =='processed_ds_name': if '*' in value: searchstr(value) else: reading_procds_check(value) elif name=='logical_file_name': if '*' in value: searchstr(value) else: reading_lfn_check(value) elif name=='processing_version': procversion(value) elif name=='global_tag': if '*' in value: searchstr(value) else: globalTag(value) elif name == 'create_by': DBSUser(value) elif name == 'last_modified_by': DBSUser(value) else: searchstr(value) elif type(value) == list: if name == 'logical_file_name': for f in value: if '*' in f: searchstr(f) else: reading_lfn_check(f) elif name == 'block_names': for block_name in value: reading_block_check(block_name) elif name == 'run_num': for run_num in value: try: int(run_num) except Exception: try: min_run, max_run = run_num.split('-', 1) int(min_run) int(max_run) except Exception as e: serverLog = str(e) + "\n run_num=%s is an invalid run number." %run_num dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid run number." %run_num[:10],\ serverError=serverLog, logger=log.error) elif name == 'dataset_id': for id in value: try: int(id) except Exception : try: min_id, max_id = id.split('-', 1) int(min_id) int(max_id) except Exception as e : serverLog = str(e) + "\n dataset_id=%s is an invalid oracle id." %dataset_id dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid dataset_id." %id[:10], \ serverError=serverLog, logger=log.error) except AssertionError as ae: serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(name, value) #print ae dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %value[:10],\ serverError=serverLog, logger=log.error) except Exception as e1: raise return _func_(*args, **kw) return wrapped return checkTypes
python
def inputChecks(**_params_): """ This is a function to check all the input for GET APIs. """ def checkTypes(_func_, _params_ = _params_): log = clog.error_log @wraps(_func_) def wrapped(*args, **kw): arg_names = _func_.__code__.co_varnames[:_func_.__code__.co_argcount] ka = {} ka.update(list(zip(arg_names, args))) ka.update(kw) #print ka for name, value in ka.iteritems(): #In fact the framework removes all the input variables that is not in the args list of _addMethod. #So DBS list API will never see these variables. For example, if one has #http://hostname/cms_dbs/DBS/datatiers?name=abc, the API will get a request to list all the datatiers because #"name=abc" is removed by the framework since name is not a key work for the api. if name !='self': types = _params_[name] #if name =='lumi_list': value = cjson.decode(value) if not isinstance(value, types): serverlog = "Expected '%s' to be %s; was %s." % (name, types, type(value)) #raise TypeError, "Expected '%s' to be %s; was %s." % (name, types, type(value)) dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input DataType %s for %s..." %(type(value), name[:10]),\ logger=log.error, serverError=serverlog) else: try: if isinstance(value, basestring): try: value = str(value) except: dbsExceptionHandler("dbsException-invalid-input", "invalid value for %s" %name) if name == 'dataset': if '*' in value: searchdataset(value) else: reading_dataset_check(value) elif name =='lumi_list': value = cjson.decode(value) elif name =='validFileOnly': try: int(value) except Exception as e: dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error) elif name =='sumOverLumi': try: int(value) except Exception as e: dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error) elif name =='block_name': if '*' in value: searchblock(value) else: reading_block_check(value) elif name =='primary_ds_name': if '*' in value: searchstr(value) else: primdataset(value) elif name =='processed_ds_name': if '*' in value: searchstr(value) else: reading_procds_check(value) elif name=='logical_file_name': if '*' in value: searchstr(value) else: reading_lfn_check(value) elif name=='processing_version': procversion(value) elif name=='global_tag': if '*' in value: searchstr(value) else: globalTag(value) elif name == 'create_by': DBSUser(value) elif name == 'last_modified_by': DBSUser(value) else: searchstr(value) elif type(value) == list: if name == 'logical_file_name': for f in value: if '*' in f: searchstr(f) else: reading_lfn_check(f) elif name == 'block_names': for block_name in value: reading_block_check(block_name) elif name == 'run_num': for run_num in value: try: int(run_num) except Exception: try: min_run, max_run = run_num.split('-', 1) int(min_run) int(max_run) except Exception as e: serverLog = str(e) + "\n run_num=%s is an invalid run number." %run_num dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid run number." %run_num[:10],\ serverError=serverLog, logger=log.error) elif name == 'dataset_id': for id in value: try: int(id) except Exception : try: min_id, max_id = id.split('-', 1) int(min_id) int(max_id) except Exception as e : serverLog = str(e) + "\n dataset_id=%s is an invalid oracle id." %dataset_id dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid dataset_id." %id[:10], \ serverError=serverLog, logger=log.error) except AssertionError as ae: serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(name, value) #print ae dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %value[:10],\ serverError=serverLog, logger=log.error) except Exception as e1: raise return _func_(*args, **kw) return wrapped return checkTypes
[ "def", "inputChecks", "(", "*", "*", "_params_", ")", ":", "def", "checkTypes", "(", "_func_", ",", "_params_", "=", "_params_", ")", ":", "log", "=", "clog", ".", "error_log", "@", "wraps", "(", "_func_", ")", "def", "wrapped", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "arg_names", "=", "_func_", ".", "__code__", ".", "co_varnames", "[", ":", "_func_", ".", "__code__", ".", "co_argcount", "]", "ka", "=", "{", "}", "ka", ".", "update", "(", "list", "(", "zip", "(", "arg_names", ",", "args", ")", ")", ")", "ka", ".", "update", "(", "kw", ")", "#print ka", "for", "name", ",", "value", "in", "ka", ".", "iteritems", "(", ")", ":", "#In fact the framework removes all the input variables that is not in the args list of _addMethod.", "#So DBS list API will never see these variables. For example, if one has", "#http://hostname/cms_dbs/DBS/datatiers?name=abc, the API will get a request to list all the datatiers because", "#\"name=abc\" is removed by the framework since name is not a key work for the api.", "if", "name", "!=", "'self'", ":", "types", "=", "_params_", "[", "name", "]", "#if name =='lumi_list': value = cjson.decode(value)", "if", "not", "isinstance", "(", "value", ",", "types", ")", ":", "serverlog", "=", "\"Expected '%s' to be %s; was %s.\"", "%", "(", "name", ",", "types", ",", "type", "(", "value", ")", ")", "#raise TypeError, \"Expected '%s' to be %s; was %s.\" % (name, types, type(value))", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"Invalid Input DataType %s for %s...\"", "%", "(", "type", "(", "value", ")", ",", "name", "[", ":", "10", "]", ")", ",", "logger", "=", "log", ".", "error", ",", "serverError", "=", "serverlog", ")", "else", ":", "try", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "try", ":", "value", "=", "str", "(", "value", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input\"", ",", "\"invalid value for %s\"", "%", "name", ")", "if", "name", "==", "'dataset'", ":", "if", "'*'", "in", "value", ":", "searchdataset", "(", "value", ")", "else", ":", "reading_dataset_check", "(", "value", ")", "elif", "name", "==", "'lumi_list'", ":", "value", "=", "cjson", ".", "decode", "(", "value", ")", "elif", "name", "==", "'validFileOnly'", ":", "try", ":", "int", "(", "value", ")", "except", "Exception", "as", "e", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"invalid value for %s\"", "%", "name", ",", "serverError", "=", "\"invalid value %s for %s\"", "%", "(", "value", ",", "name", ")", ",", "logger", "=", "log", ".", "error", ")", "elif", "name", "==", "'sumOverLumi'", ":", "try", ":", "int", "(", "value", ")", "except", "Exception", "as", "e", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"invalid value for %s\"", "%", "name", ",", "serverError", "=", "\"invalid value %s for %s\"", "%", "(", "value", ",", "name", ")", ",", "logger", "=", "log", ".", "error", ")", "elif", "name", "==", "'block_name'", ":", "if", "'*'", "in", "value", ":", "searchblock", "(", "value", ")", "else", ":", "reading_block_check", "(", "value", ")", "elif", "name", "==", "'primary_ds_name'", ":", "if", "'*'", "in", "value", ":", "searchstr", "(", "value", ")", "else", ":", "primdataset", "(", "value", ")", "elif", "name", "==", "'processed_ds_name'", ":", "if", "'*'", "in", "value", ":", "searchstr", "(", "value", ")", "else", ":", "reading_procds_check", "(", "value", ")", "elif", "name", "==", "'logical_file_name'", ":", "if", "'*'", "in", "value", ":", "searchstr", "(", "value", ")", "else", ":", "reading_lfn_check", "(", "value", ")", "elif", "name", "==", "'processing_version'", ":", "procversion", "(", "value", ")", "elif", "name", "==", "'global_tag'", ":", "if", "'*'", "in", "value", ":", "searchstr", "(", "value", ")", "else", ":", "globalTag", "(", "value", ")", "elif", "name", "==", "'create_by'", ":", "DBSUser", "(", "value", ")", "elif", "name", "==", "'last_modified_by'", ":", "DBSUser", "(", "value", ")", "else", ":", "searchstr", "(", "value", ")", "elif", "type", "(", "value", ")", "==", "list", ":", "if", "name", "==", "'logical_file_name'", ":", "for", "f", "in", "value", ":", "if", "'*'", "in", "f", ":", "searchstr", "(", "f", ")", "else", ":", "reading_lfn_check", "(", "f", ")", "elif", "name", "==", "'block_names'", ":", "for", "block_name", "in", "value", ":", "reading_block_check", "(", "block_name", ")", "elif", "name", "==", "'run_num'", ":", "for", "run_num", "in", "value", ":", "try", ":", "int", "(", "run_num", ")", "except", "Exception", ":", "try", ":", "min_run", ",", "max_run", "=", "run_num", ".", "split", "(", "'-'", ",", "1", ")", "int", "(", "min_run", ")", "int", "(", "max_run", ")", "except", "Exception", "as", "e", ":", "serverLog", "=", "str", "(", "e", ")", "+", "\"\\n run_num=%s is an invalid run number.\"", "%", "run_num", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"Invalid input data %s...: invalid run number.\"", "%", "run_num", "[", ":", "10", "]", ",", "serverError", "=", "serverLog", ",", "logger", "=", "log", ".", "error", ")", "elif", "name", "==", "'dataset_id'", ":", "for", "id", "in", "value", ":", "try", ":", "int", "(", "id", ")", "except", "Exception", ":", "try", ":", "min_id", ",", "max_id", "=", "id", ".", "split", "(", "'-'", ",", "1", ")", "int", "(", "min_id", ")", "int", "(", "max_id", ")", "except", "Exception", "as", "e", ":", "serverLog", "=", "str", "(", "e", ")", "+", "\"\\n dataset_id=%s is an invalid oracle id.\"", "%", "dataset_id", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"Invalid input data %s...: invalid dataset_id.\"", "%", "id", "[", ":", "10", "]", ",", "serverError", "=", "serverLog", ",", "logger", "=", "log", ".", "error", ")", "except", "AssertionError", "as", "ae", ":", "serverLog", "=", "str", "(", "ae", ")", "+", "\" key-value pair (%s, %s) cannot pass input checking\"", "%", "(", "name", ",", "value", ")", "#print ae", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"Invalid Input Data %s...: Not Match Required Format\"", "%", "value", "[", ":", "10", "]", ",", "serverError", "=", "serverLog", ",", "logger", "=", "log", ".", "error", ")", "except", "Exception", "as", "e1", ":", "raise", "return", "_func_", "(", "*", "args", ",", "*", "*", "kw", ")", "return", "wrapped", "return", "checkTypes" ]
This is a function to check all the input for GET APIs.
[ "This", "is", "a", "function", "to", "check", "all", "the", "input", "for", "GET", "APIs", "." ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/utils/DBSInputValidation.py#L31-L155
train
dmwm/DBS
Server/Python/src/dbs/utils/DBSInputValidation.py
validateStringInput
def validateStringInput(input_key,input_data, read=False): """ To check if a string has the required format. This is only used for POST APIs. """ log = clog.error_log func = None if '*' in input_data or '%' in input_data: func = validationFunctionWildcard.get(input_key) if func is None: func = searchstr elif input_key == 'migration_input' : if input_data.find('#') != -1 : func = block else : func = dataset else: if not read: func = validationFunction.get(input_key) if func is None: func = namestr else: if input_key == 'dataset': func = reading_dataset_check elif input_key == 'block_name': func = reading_block_check elif input_key == 'logical_file_name': func = reading_lfn_check else: func = namestr try: func(input_data) except AssertionError as ae: serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(input_key, input_data) #print serverLog dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %input_data[:10], \ logger=log.error, serverError=serverLog) return input_data
python
def validateStringInput(input_key,input_data, read=False): """ To check if a string has the required format. This is only used for POST APIs. """ log = clog.error_log func = None if '*' in input_data or '%' in input_data: func = validationFunctionWildcard.get(input_key) if func is None: func = searchstr elif input_key == 'migration_input' : if input_data.find('#') != -1 : func = block else : func = dataset else: if not read: func = validationFunction.get(input_key) if func is None: func = namestr else: if input_key == 'dataset': func = reading_dataset_check elif input_key == 'block_name': func = reading_block_check elif input_key == 'logical_file_name': func = reading_lfn_check else: func = namestr try: func(input_data) except AssertionError as ae: serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(input_key, input_data) #print serverLog dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %input_data[:10], \ logger=log.error, serverError=serverLog) return input_data
[ "def", "validateStringInput", "(", "input_key", ",", "input_data", ",", "read", "=", "False", ")", ":", "log", "=", "clog", ".", "error_log", "func", "=", "None", "if", "'*'", "in", "input_data", "or", "'%'", "in", "input_data", ":", "func", "=", "validationFunctionWildcard", ".", "get", "(", "input_key", ")", "if", "func", "is", "None", ":", "func", "=", "searchstr", "elif", "input_key", "==", "'migration_input'", ":", "if", "input_data", ".", "find", "(", "'#'", ")", "!=", "-", "1", ":", "func", "=", "block", "else", ":", "func", "=", "dataset", "else", ":", "if", "not", "read", ":", "func", "=", "validationFunction", ".", "get", "(", "input_key", ")", "if", "func", "is", "None", ":", "func", "=", "namestr", "else", ":", "if", "input_key", "==", "'dataset'", ":", "func", "=", "reading_dataset_check", "elif", "input_key", "==", "'block_name'", ":", "func", "=", "reading_block_check", "elif", "input_key", "==", "'logical_file_name'", ":", "func", "=", "reading_lfn_check", "else", ":", "func", "=", "namestr", "try", ":", "func", "(", "input_data", ")", "except", "AssertionError", "as", "ae", ":", "serverLog", "=", "str", "(", "ae", ")", "+", "\" key-value pair (%s, %s) cannot pass input checking\"", "%", "(", "input_key", ",", "input_data", ")", "#print serverLog", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "message", "=", "\"Invalid Input Data %s...: Not Match Required Format\"", "%", "input_data", "[", ":", "10", "]", ",", "logger", "=", "log", ".", "error", ",", "serverError", "=", "serverLog", ")", "return", "input_data" ]
To check if a string has the required format. This is only used for POST APIs.
[ "To", "check", "if", "a", "string", "has", "the", "required", "format", ".", "This", "is", "only", "used", "for", "POST", "APIs", "." ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/utils/DBSInputValidation.py#L305-L339
train
dmwm/DBS
Server/Python/src/dbs/dao/Oracle/Service/List.py
List.execute
def execute(self, conn, transaction=False): """ Lists all primary datasets if pattern is not provided. """ sql = self.sql binds = {} cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
python
def execute(self, conn, transaction=False): """ Lists all primary datasets if pattern is not provided. """ sql = self.sql binds = {} cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
[ "def", "execute", "(", "self", ",", "conn", ",", "transaction", "=", "False", ")", ":", "sql", "=", "self", ".", "sql", "binds", "=", "{", "}", "cursors", "=", "self", ".", "dbi", ".", "processData", "(", "sql", ",", "binds", ",", "conn", ",", "transaction", ",", "returnCursor", "=", "True", ")", "result", "=", "[", "]", "for", "c", "in", "cursors", ":", "result", ".", "extend", "(", "self", ".", "formatCursor", "(", "c", ",", "size", "=", "100", ")", ")", "return", "result" ]
Lists all primary datasets if pattern is not provided.
[ "Lists", "all", "primary", "datasets", "if", "pattern", "is", "not", "provided", "." ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/Service/List.py#L26-L37
train
dmwm/DBS
Server/Python/src/dbs/dao/Oracle/MigrationRequests/Remove.py
Remove.execute
def execute(self, conn, daoinput, transaction = False): """ daoinput keys: migration_request_id """ if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationRequests/Remove. Expects db connection from upper layer.", self.logger.exception) daoinput['create_by'] = dbsUtils().getCreateBy() try: msg = "DBSMigration: Invalid request. Sucessfully processed or processing requests cannot be removed,\ or the requested migration did not exist, or the requestor for removing and creating has to be the same user. " checkit = self.dbi.processData(self.select, daoinput, conn, transaction) if self.formatDict(checkit)[0]["count"] >= 1: reqID = {'migration_rqst_id':daoinput['migration_rqst_id']} result = self.dbi.processData(self.sql, reqID, conn, transaction) else: dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception) except: raise
python
def execute(self, conn, daoinput, transaction = False): """ daoinput keys: migration_request_id """ if not conn: dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationRequests/Remove. Expects db connection from upper layer.", self.logger.exception) daoinput['create_by'] = dbsUtils().getCreateBy() try: msg = "DBSMigration: Invalid request. Sucessfully processed or processing requests cannot be removed,\ or the requested migration did not exist, or the requestor for removing and creating has to be the same user. " checkit = self.dbi.processData(self.select, daoinput, conn, transaction) if self.formatDict(checkit)[0]["count"] >= 1: reqID = {'migration_rqst_id':daoinput['migration_rqst_id']} result = self.dbi.processData(self.sql, reqID, conn, transaction) else: dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception) except: raise
[ "def", "execute", "(", "self", ",", "conn", ",", "daoinput", ",", "transaction", "=", "False", ")", ":", "if", "not", "conn", ":", "dbsExceptionHandler", "(", "\"dbsException-failed-connect2host\"", ",", "\"Oracle/MigrationRequests/Remove. Expects db connection from upper layer.\"", ",", "self", ".", "logger", ".", "exception", ")", "daoinput", "[", "'create_by'", "]", "=", "dbsUtils", "(", ")", ".", "getCreateBy", "(", ")", "try", ":", "msg", "=", "\"DBSMigration: Invalid request. Sucessfully processed or processing requests cannot be removed,\\\n or the requested migration did not exist, or the requestor for removing and creating has to be the same user. \"", "checkit", "=", "self", ".", "dbi", ".", "processData", "(", "self", ".", "select", ",", "daoinput", ",", "conn", ",", "transaction", ")", "if", "self", ".", "formatDict", "(", "checkit", ")", "[", "0", "]", "[", "\"count\"", "]", ">=", "1", ":", "reqID", "=", "{", "'migration_rqst_id'", ":", "daoinput", "[", "'migration_rqst_id'", "]", "}", "result", "=", "self", ".", "dbi", ".", "processData", "(", "self", ".", "sql", ",", "reqID", ",", "conn", ",", "transaction", ")", "else", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "msg", ",", "self", ".", "logger", ".", "exception", ")", "except", ":", "raise" ]
daoinput keys: migration_request_id
[ "daoinput", "keys", ":", "migration_request_id" ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/MigrationRequests/Remove.py#L34-L53
train
dmwm/DBS
Server/Python/src/dbs/utils/dbsUtils.py
jsonstreamer
def jsonstreamer(func): """JSON streamer decorator""" def wrapper (self, *args, **kwds): gen = func (self, *args, **kwds) yield "[" firstItem = True for item in gen: if not firstItem: yield "," else: firstItem = False yield cjson.encode(item) yield "]" return wrapper
python
def jsonstreamer(func): """JSON streamer decorator""" def wrapper (self, *args, **kwds): gen = func (self, *args, **kwds) yield "[" firstItem = True for item in gen: if not firstItem: yield "," else: firstItem = False yield cjson.encode(item) yield "]" return wrapper
[ "def", "jsonstreamer", "(", "func", ")", ":", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "gen", "=", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", "yield", "\"[\"", "firstItem", "=", "True", "for", "item", "in", "gen", ":", "if", "not", "firstItem", ":", "yield", "\",\"", "else", ":", "firstItem", "=", "False", "yield", "cjson", ".", "encode", "(", "item", ")", "yield", "\"]\"", "return", "wrapper" ]
JSON streamer decorator
[ "JSON", "streamer", "decorator" ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/utils/dbsUtils.py#L69-L82
train
dmwm/DBS
Server/Python/src/dbs/utils/dbsUtils.py
dbsUtils.decodeLumiIntervals
def decodeLumiIntervals(self, lumi_list): """lumi_list must be of one of the two following formats: '[[a,b], [c,d],' or [a1, a2, a3] """ errmessage = "lumi intervals must be of one of the two following formats: '[[a,b], [c,d], ...],' or [a1, a2, a3 ...] " if isinstance(lumi_list, basestring): try: lumi_list = cjson.decode(lumi_list) except: dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi format", None, "Could not decode the input lumi_list: %s" % lumi_list) if not isinstance(lumi_list, list): dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi input", None, errmessage) #check only the first element... in case [1, '2', '3'] is passed, exception will not be raised here. if len(lumi_list)==0 or isinstance(lumi_list[0], int): return lumi_list elif isinstance(lumi_list[0], list): result = [] resultext = result.extend for lumiinterval in lumi_list: if not isinstance(lumiinterval, list) or len(lumiinterval) != 2: dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi input", None, errmessage) resultext(range(lumiinterval[0], lumiinterval[1]+1)) result = list(set(result)) #removes the dublicates, no need to sort return result else: dbsExceptionHandler("dbsException-invalid-input2", 'invalid lumi format', None, \ 'Unsupported lumi format: %s. %s' % (lumi_list, errmessage))
python
def decodeLumiIntervals(self, lumi_list): """lumi_list must be of one of the two following formats: '[[a,b], [c,d],' or [a1, a2, a3] """ errmessage = "lumi intervals must be of one of the two following formats: '[[a,b], [c,d], ...],' or [a1, a2, a3 ...] " if isinstance(lumi_list, basestring): try: lumi_list = cjson.decode(lumi_list) except: dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi format", None, "Could not decode the input lumi_list: %s" % lumi_list) if not isinstance(lumi_list, list): dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi input", None, errmessage) #check only the first element... in case [1, '2', '3'] is passed, exception will not be raised here. if len(lumi_list)==0 or isinstance(lumi_list[0], int): return lumi_list elif isinstance(lumi_list[0], list): result = [] resultext = result.extend for lumiinterval in lumi_list: if not isinstance(lumiinterval, list) or len(lumiinterval) != 2: dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi input", None, errmessage) resultext(range(lumiinterval[0], lumiinterval[1]+1)) result = list(set(result)) #removes the dublicates, no need to sort return result else: dbsExceptionHandler("dbsException-invalid-input2", 'invalid lumi format', None, \ 'Unsupported lumi format: %s. %s' % (lumi_list, errmessage))
[ "def", "decodeLumiIntervals", "(", "self", ",", "lumi_list", ")", ":", "errmessage", "=", "\"lumi intervals must be of one of the two following formats: '[[a,b], [c,d], ...],' or [a1, a2, a3 ...] \"", "if", "isinstance", "(", "lumi_list", ",", "basestring", ")", ":", "try", ":", "lumi_list", "=", "cjson", ".", "decode", "(", "lumi_list", ")", "except", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"invalid lumi format\"", ",", "None", ",", "\"Could not decode the input lumi_list: %s\"", "%", "lumi_list", ")", "if", "not", "isinstance", "(", "lumi_list", ",", "list", ")", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"invalid lumi input\"", ",", "None", ",", "errmessage", ")", "#check only the first element... in case [1, '2', '3'] is passed, exception will not be raised here.", "if", "len", "(", "lumi_list", ")", "==", "0", "or", "isinstance", "(", "lumi_list", "[", "0", "]", ",", "int", ")", ":", "return", "lumi_list", "elif", "isinstance", "(", "lumi_list", "[", "0", "]", ",", "list", ")", ":", "result", "=", "[", "]", "resultext", "=", "result", ".", "extend", "for", "lumiinterval", "in", "lumi_list", ":", "if", "not", "isinstance", "(", "lumiinterval", ",", "list", ")", "or", "len", "(", "lumiinterval", ")", "!=", "2", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "\"invalid lumi input\"", ",", "None", ",", "errmessage", ")", "resultext", "(", "range", "(", "lumiinterval", "[", "0", "]", ",", "lumiinterval", "[", "1", "]", "+", "1", ")", ")", "result", "=", "list", "(", "set", "(", "result", ")", ")", "#removes the dublicates, no need to sort", "return", "result", "else", ":", "dbsExceptionHandler", "(", "\"dbsException-invalid-input2\"", ",", "'invalid lumi format'", ",", "None", ",", "'Unsupported lumi format: %s. %s'", "%", "(", "lumi_list", ",", "errmessage", ")", ")" ]
lumi_list must be of one of the two following formats: '[[a,b], [c,d],' or [a1, a2, a3]
[ "lumi_list", "must", "be", "of", "one", "of", "the", "two", "following", "formats", ":", "[[", "a", "b", "]", "[", "c", "d", "]", "or", "[", "a1", "a2", "a3", "]" ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/utils/dbsUtils.py#L36-L67
train
dmwm/DBS
Server/Python/src/dbs/business/DBSDatasetAccessType.py
DBSDatasetAccessType.listDatasetAccessTypes
def listDatasetAccessTypes(self, dataset_access_type=""): """ List dataset access types """ if isinstance(dataset_access_type, basestring): try: dataset_access_type = str(dataset_access_type) except: dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type) else: dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type) conn = self.dbi.connection() try: plist = self.datasetAccessType.execute(conn, dataset_access_type.upper()) result = [{}] if plist: t = [] for i in plist: for k, v in i.iteritems(): t.append(v) result[0]['dataset_access_type'] = t return result finally: if conn: conn.close()
python
def listDatasetAccessTypes(self, dataset_access_type=""): """ List dataset access types """ if isinstance(dataset_access_type, basestring): try: dataset_access_type = str(dataset_access_type) except: dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type) else: dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type) conn = self.dbi.connection() try: plist = self.datasetAccessType.execute(conn, dataset_access_type.upper()) result = [{}] if plist: t = [] for i in plist: for k, v in i.iteritems(): t.append(v) result[0]['dataset_access_type'] = t return result finally: if conn: conn.close()
[ "def", "listDatasetAccessTypes", "(", "self", ",", "dataset_access_type", "=", "\"\"", ")", ":", "if", "isinstance", "(", "dataset_access_type", ",", "basestring", ")", ":", "try", ":", "dataset_access_type", "=", "str", "(", "dataset_access_type", ")", "except", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "'dataset_access_type given is not valid : %s'", "%", "dataset_access_type", ")", "else", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "'dataset_access_type given is not valid : %s'", "%", "dataset_access_type", ")", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "try", ":", "plist", "=", "self", ".", "datasetAccessType", ".", "execute", "(", "conn", ",", "dataset_access_type", ".", "upper", "(", ")", ")", "result", "=", "[", "{", "}", "]", "if", "plist", ":", "t", "=", "[", "]", "for", "i", "in", "plist", ":", "for", "k", ",", "v", "in", "i", ".", "iteritems", "(", ")", ":", "t", ".", "append", "(", "v", ")", "result", "[", "0", "]", "[", "'dataset_access_type'", "]", "=", "t", "return", "result", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
List dataset access types
[ "List", "dataset", "access", "types" ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSDatasetAccessType.py#L24-L48
train
closeio/flask-ipblock
flask_ipblock/__init__.py
IPBlock.block_before
def block_before(self): """ Check the current request and block it if the IP address it's coming from is blacklisted. """ # To avoid unnecessary database queries, ignore the IP check for # requests for static files if request.path.startswith(url_for('static', filename='')): return # Some static files might be served from the root path (e.g. # favicon.ico, robots.txt, etc.). Ignore the IP check for most # common extensions of those files. ignored_extensions = ('ico', 'png', 'txt', 'xml') if request.path.rsplit('.', 1)[-1] in ignored_extensions: return ips = request.headers.getlist('X-Forwarded-For') if not ips: return # If the X-Forwarded-For header contains multiple comma-separated # IP addresses, we're only interested in the last one. ip = ips[0].strip() if ip[-1] == ',': ip = ip[:-1] ip = ip.rsplit(',', 1)[-1].strip() if self.matches_ip(ip): if self.logger is not None: self.logger.info("IPBlock: matched {}, {}".format(ip, self.block_msg)) if self.blocking_enabled: return 'IP Blocked', 200
python
def block_before(self): """ Check the current request and block it if the IP address it's coming from is blacklisted. """ # To avoid unnecessary database queries, ignore the IP check for # requests for static files if request.path.startswith(url_for('static', filename='')): return # Some static files might be served from the root path (e.g. # favicon.ico, robots.txt, etc.). Ignore the IP check for most # common extensions of those files. ignored_extensions = ('ico', 'png', 'txt', 'xml') if request.path.rsplit('.', 1)[-1] in ignored_extensions: return ips = request.headers.getlist('X-Forwarded-For') if not ips: return # If the X-Forwarded-For header contains multiple comma-separated # IP addresses, we're only interested in the last one. ip = ips[0].strip() if ip[-1] == ',': ip = ip[:-1] ip = ip.rsplit(',', 1)[-1].strip() if self.matches_ip(ip): if self.logger is not None: self.logger.info("IPBlock: matched {}, {}".format(ip, self.block_msg)) if self.blocking_enabled: return 'IP Blocked', 200
[ "def", "block_before", "(", "self", ")", ":", "# To avoid unnecessary database queries, ignore the IP check for", "# requests for static files", "if", "request", ".", "path", ".", "startswith", "(", "url_for", "(", "'static'", ",", "filename", "=", "''", ")", ")", ":", "return", "# Some static files might be served from the root path (e.g.", "# favicon.ico, robots.txt, etc.). Ignore the IP check for most", "# common extensions of those files.", "ignored_extensions", "=", "(", "'ico'", ",", "'png'", ",", "'txt'", ",", "'xml'", ")", "if", "request", ".", "path", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", "in", "ignored_extensions", ":", "return", "ips", "=", "request", ".", "headers", ".", "getlist", "(", "'X-Forwarded-For'", ")", "if", "not", "ips", ":", "return", "# If the X-Forwarded-For header contains multiple comma-separated", "# IP addresses, we're only interested in the last one.", "ip", "=", "ips", "[", "0", "]", ".", "strip", "(", ")", "if", "ip", "[", "-", "1", "]", "==", "','", ":", "ip", "=", "ip", "[", ":", "-", "1", "]", "ip", "=", "ip", ".", "rsplit", "(", "','", ",", "1", ")", "[", "-", "1", "]", ".", "strip", "(", ")", "if", "self", ".", "matches_ip", "(", "ip", ")", ":", "if", "self", ".", "logger", "is", "not", "None", ":", "self", ".", "logger", ".", "info", "(", "\"IPBlock: matched {}, {}\"", ".", "format", "(", "ip", ",", "self", ".", "block_msg", ")", ")", "if", "self", ".", "blocking_enabled", ":", "return", "'IP Blocked'", ",", "200" ]
Check the current request and block it if the IP address it's coming from is blacklisted.
[ "Check", "the", "current", "request", "and", "block", "it", "if", "the", "IP", "address", "it", "s", "coming", "from", "is", "blacklisted", "." ]
d4d6d50e4a7d1ba7aa0f8d7298ee8d63d25b644f
https://github.com/closeio/flask-ipblock/blob/d4d6d50e4a7d1ba7aa0f8d7298ee8d63d25b644f/flask_ipblock/__init__.py#L43-L75
train
closeio/flask-ipblock
flask_ipblock/__init__.py
IPBlock.matches_ip
def matches_ip(self, ip): """Return True if the given IP is blacklisted, False otherwise.""" # Check the cache if caching is enabled if self.cache is not None: matches_ip = self.cache.get(ip) if matches_ip is not None: return matches_ip # Query MongoDB to see if the IP is blacklisted matches_ip = IPNetwork.matches_ip( ip, read_preference=self.read_preference) # Cache the result if caching is enabled if self.cache is not None: self.cache[ip] = matches_ip return matches_ip
python
def matches_ip(self, ip): """Return True if the given IP is blacklisted, False otherwise.""" # Check the cache if caching is enabled if self.cache is not None: matches_ip = self.cache.get(ip) if matches_ip is not None: return matches_ip # Query MongoDB to see if the IP is blacklisted matches_ip = IPNetwork.matches_ip( ip, read_preference=self.read_preference) # Cache the result if caching is enabled if self.cache is not None: self.cache[ip] = matches_ip return matches_ip
[ "def", "matches_ip", "(", "self", ",", "ip", ")", ":", "# Check the cache if caching is enabled", "if", "self", ".", "cache", "is", "not", "None", ":", "matches_ip", "=", "self", ".", "cache", ".", "get", "(", "ip", ")", "if", "matches_ip", "is", "not", "None", ":", "return", "matches_ip", "# Query MongoDB to see if the IP is blacklisted", "matches_ip", "=", "IPNetwork", ".", "matches_ip", "(", "ip", ",", "read_preference", "=", "self", ".", "read_preference", ")", "# Cache the result if caching is enabled", "if", "self", ".", "cache", "is", "not", "None", ":", "self", ".", "cache", "[", "ip", "]", "=", "matches_ip", "return", "matches_ip" ]
Return True if the given IP is blacklisted, False otherwise.
[ "Return", "True", "if", "the", "given", "IP", "is", "blacklisted", "False", "otherwise", "." ]
d4d6d50e4a7d1ba7aa0f8d7298ee8d63d25b644f
https://github.com/closeio/flask-ipblock/blob/d4d6d50e4a7d1ba7aa0f8d7298ee8d63d25b644f/flask_ipblock/__init__.py#L77-L94
train
dmwm/DBS
Server/Python/src/dbs/dao/Oracle/FileParent/ListChild.py
ListChild.execute
def execute(self, conn, logical_file_name, block_name, block_id, transaction=False): """ Lists all primary datasets if pattern is not provided. """ binds = {} sql = '' if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator(logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner) wheresql = "WHERE B.BLOCK_NAME = :block_name" binds = {"block_name": block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) elif block_id: wheresql = "WHERE F.BLOCK_ID = :block_id" binds = {"block_id": block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) else: dbsExceptionHandler('dbsException-invalid-input', "Logical_file_names is required for listChild dao.", self.logger.exception) cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
python
def execute(self, conn, logical_file_name, block_name, block_id, transaction=False): """ Lists all primary datasets if pattern is not provided. """ binds = {} sql = '' if logical_file_name: if isinstance(logical_file_name, basestring): wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name" binds = {"logical_file_name": logical_file_name} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) elif isinstance(logical_file_name, list): wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)" lfn_generator, binds = create_token_generator(logical_file_name) sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql, wheresql=wheresql) elif block_name: joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner) wheresql = "WHERE B.BLOCK_NAME = :block_name" binds = {"block_name": block_name} sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql) elif block_id: wheresql = "WHERE F.BLOCK_ID = :block_id" binds = {"block_id": block_id} sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql) else: dbsExceptionHandler('dbsException-invalid-input', "Logical_file_names is required for listChild dao.", self.logger.exception) cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True) result = [] for c in cursors: result.extend(self.formatCursor(c, size=100)) return result
[ "def", "execute", "(", "self", ",", "conn", ",", "logical_file_name", ",", "block_name", ",", "block_id", ",", "transaction", "=", "False", ")", ":", "binds", "=", "{", "}", "sql", "=", "''", "if", "logical_file_name", ":", "if", "isinstance", "(", "logical_file_name", ",", "basestring", ")", ":", "wheresql", "=", "\"WHERE F.LOGICAL_FILE_NAME = :logical_file_name\"", "binds", "=", "{", "\"logical_file_name\"", ":", "logical_file_name", "}", "sql", "=", "\"{sql} {wheresql}\"", ".", "format", "(", "sql", "=", "self", ".", "sql", ",", "wheresql", "=", "wheresql", ")", "elif", "isinstance", "(", "logical_file_name", ",", "list", ")", ":", "wheresql", "=", "\"WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)\"", "lfn_generator", ",", "binds", "=", "create_token_generator", "(", "logical_file_name", ")", "sql", "=", "\"{lfn_generator} {sql} {wheresql}\"", ".", "format", "(", "lfn_generator", "=", "lfn_generator", ",", "sql", "=", "self", ".", "sql", ",", "wheresql", "=", "wheresql", ")", "elif", "block_name", ":", "joins", "=", "\"JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID\"", ".", "format", "(", "owner", "=", "self", ".", "owner", ")", "wheresql", "=", "\"WHERE B.BLOCK_NAME = :block_name\"", "binds", "=", "{", "\"block_name\"", ":", "block_name", "}", "sql", "=", "\"{sql} {joins} {wheresql}\"", ".", "format", "(", "sql", "=", "self", ".", "sql", ",", "joins", "=", "joins", ",", "wheresql", "=", "wheresql", ")", "elif", "block_id", ":", "wheresql", "=", "\"WHERE F.BLOCK_ID = :block_id\"", "binds", "=", "{", "\"block_id\"", ":", "block_id", "}", "sql", "=", "\"{sql} {wheresql}\"", ".", "format", "(", "sql", "=", "self", ".", "sql", ",", "wheresql", "=", "wheresql", ")", "else", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"Logical_file_names is required for listChild dao.\"", ",", "self", ".", "logger", ".", "exception", ")", "cursors", "=", "self", ".", "dbi", ".", "processData", "(", "sql", ",", "binds", ",", "conn", ",", "transaction", "=", "transaction", ",", "returnCursor", "=", "True", ")", "result", "=", "[", "]", "for", "c", "in", "cursors", ":", "result", ".", "extend", "(", "self", ".", "formatCursor", "(", "c", ",", "size", "=", "100", ")", ")", "return", "result" ]
Lists all primary datasets if pattern is not provided.
[ "Lists", "all", "primary", "datasets", "if", "pattern", "is", "not", "provided", "." ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/dao/Oracle/FileParent/ListChild.py#L30-L63
train
dmwm/DBS
Server/Python/src/dbs/business/DBSAcquisitionEra.py
DBSAcquisitionEra.listAcquisitionEras
def listAcquisitionEras(self, acq=''): """ Returns all acquistion eras in dbs """ try: acq = str(acq) except: dbsExceptionHandler('dbsException-invalid-input', 'acquistion_era_name given is not valid : %s' %acq) conn = self.dbi.connection() try: result = self.acqlst.execute(conn, acq) return result finally: if conn:conn.close()
python
def listAcquisitionEras(self, acq=''): """ Returns all acquistion eras in dbs """ try: acq = str(acq) except: dbsExceptionHandler('dbsException-invalid-input', 'acquistion_era_name given is not valid : %s' %acq) conn = self.dbi.connection() try: result = self.acqlst.execute(conn, acq) return result finally: if conn:conn.close()
[ "def", "listAcquisitionEras", "(", "self", ",", "acq", "=", "''", ")", ":", "try", ":", "acq", "=", "str", "(", "acq", ")", "except", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "'acquistion_era_name given is not valid : %s'", "%", "acq", ")", "conn", "=", "self", ".", "dbi", ".", "connection", "(", ")", "try", ":", "result", "=", "self", ".", "acqlst", ".", "execute", "(", "conn", ",", "acq", ")", "return", "result", "finally", ":", "if", "conn", ":", "conn", ".", "close", "(", ")" ]
Returns all acquistion eras in dbs
[ "Returns", "all", "acquistion", "eras", "in", "dbs" ]
9619bafce3783b3e77f0415f8f9a258e33dd1e6f
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/business/DBSAcquisitionEra.py#L27-L40
train