id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
19,900
networks-lab/metaknowledge
metaknowledge/journalAbbreviations/backend.py
_j9SaveCurrent
def _j9SaveCurrent(sDir = '.'): """Downloads and saves all the webpages For Backend """ dname = os.path.normpath(sDir + '/' + datetime.datetime.now().strftime("%Y-%m-%d_J9_AbbreviationDocs")) if not os.path.isdir(dname): os.mkdir(dname) os.chdir(dname) else: os.chdir(dname) for urlID, urlString in j9urlGenerator(nameDict = True).items(): fname = "{}_abrvjt.html".format(urlID) f = open(fname, 'wb') f.write(urllib.request.urlopen(urlString).read())
python
def _j9SaveCurrent(sDir = '.'): dname = os.path.normpath(sDir + '/' + datetime.datetime.now().strftime("%Y-%m-%d_J9_AbbreviationDocs")) if not os.path.isdir(dname): os.mkdir(dname) os.chdir(dname) else: os.chdir(dname) for urlID, urlString in j9urlGenerator(nameDict = True).items(): fname = "{}_abrvjt.html".format(urlID) f = open(fname, 'wb') f.write(urllib.request.urlopen(urlString).read())
[ "def", "_j9SaveCurrent", "(", "sDir", "=", "'.'", ")", ":", "dname", "=", "os", ".", "path", ".", "normpath", "(", "sDir", "+", "'/'", "+", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d_J9_AbbreviationDocs\"", ")", ...
Downloads and saves all the webpages For Backend
[ "Downloads", "and", "saves", "all", "the", "webpages" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L40-L54
19,901
networks-lab/metaknowledge
metaknowledge/journalAbbreviations/backend.py
_getDict
def _getDict(j9Page): """Parses a Journal Title Abbreviations page Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed For Backend """ slines = j9Page.read().decode('utf-8').split('\n') while slines.pop(0) != "<DL>": pass currentName = slines.pop(0).split('"></A><DT>')[1] currentTag = slines.pop(0).split("<B><DD>\t")[1] j9Dict = {} while True: try: j9Dict[currentTag].append(currentName) except KeyError: j9Dict[currentTag] = [currentName] try: currentName = slines.pop(0).split('</B><DT>')[1] currentTag = slines.pop(0).split("<B><DD>\t")[1] except IndexError: break return j9Dict
python
def _getDict(j9Page): slines = j9Page.read().decode('utf-8').split('\n') while slines.pop(0) != "<DL>": pass currentName = slines.pop(0).split('"></A><DT>')[1] currentTag = slines.pop(0).split("<B><DD>\t")[1] j9Dict = {} while True: try: j9Dict[currentTag].append(currentName) except KeyError: j9Dict[currentTag] = [currentName] try: currentName = slines.pop(0).split('</B><DT>')[1] currentTag = slines.pop(0).split("<B><DD>\t")[1] except IndexError: break return j9Dict
[ "def", "_getDict", "(", "j9Page", ")", ":", "slines", "=", "j9Page", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", ".", "split", "(", "'\\n'", ")", "while", "slines", ".", "pop", "(", "0", ")", "!=", "\"<DL>\"", ":", "pass", "currentNa...
Parses a Journal Title Abbreviations page Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed For Backend
[ "Parses", "a", "Journal", "Title", "Abbreviations", "page" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L56-L79
19,902
networks-lab/metaknowledge
metaknowledge/journalAbbreviations/backend.py
_getCurrentj9Dict
def _getCurrentj9Dict(): """Downloads and parses all the webpages For Backend """ urls = j9urlGenerator() j9Dict = {} for url in urls: d = _getDict(urllib.request.urlopen(url)) if len(d) == 0: raise RuntimeError("Parsing failed, this is could require an update of the parser.") j9Dict.update(d) return j9Dict
python
def _getCurrentj9Dict(): urls = j9urlGenerator() j9Dict = {} for url in urls: d = _getDict(urllib.request.urlopen(url)) if len(d) == 0: raise RuntimeError("Parsing failed, this is could require an update of the parser.") j9Dict.update(d) return j9Dict
[ "def", "_getCurrentj9Dict", "(", ")", ":", "urls", "=", "j9urlGenerator", "(", ")", "j9Dict", "=", "{", "}", "for", "url", "in", "urls", ":", "d", "=", "_getDict", "(", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", ")", "if", "len", "(...
Downloads and parses all the webpages For Backend
[ "Downloads", "and", "parses", "all", "the", "webpages" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L81-L93
19,903
networks-lab/metaknowledge
metaknowledge/journalAbbreviations/backend.py
updatej9DB
def updatej9DB(dbname = abrevDBname, saveRawHTML = False): """Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory. # Parameters _dbname_ : `optional [str]` > The name of the database file, default is "j9Abbreviations.db" _saveRawHTML_ : `optional [bool]` > Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date. """ if saveRawHTML: rawDir = '{}/j9Raws'.format(os.path.dirname(__file__)) if not os.path.isdir(rawDir): os.mkdir(rawDir) _j9SaveCurrent(sDir = rawDir) dbLoc = os.path.join(os.path.normpath(os.path.dirname(__file__)), dbname) try: with dbm.dumb.open(dbLoc, flag = 'c') as db: try: j9Dict = _getCurrentj9Dict() except urllib.error.URLError: raise urllib.error.URLError("Unable to access server, check your connection") for k, v in j9Dict.items(): if k in db: for jName in v: if jName not in j9Dict[k]: j9Dict[k] += '|' + jName else: db[k] = '|'.join(v) except dbm.dumb.error as e: raise JournalDataBaseError("Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'".format(dbLoc, os.path.dirname(__file__), e))
python
def updatej9DB(dbname = abrevDBname, saveRawHTML = False): if saveRawHTML: rawDir = '{}/j9Raws'.format(os.path.dirname(__file__)) if not os.path.isdir(rawDir): os.mkdir(rawDir) _j9SaveCurrent(sDir = rawDir) dbLoc = os.path.join(os.path.normpath(os.path.dirname(__file__)), dbname) try: with dbm.dumb.open(dbLoc, flag = 'c') as db: try: j9Dict = _getCurrentj9Dict() except urllib.error.URLError: raise urllib.error.URLError("Unable to access server, check your connection") for k, v in j9Dict.items(): if k in db: for jName in v: if jName not in j9Dict[k]: j9Dict[k] += '|' + jName else: db[k] = '|'.join(v) except dbm.dumb.error as e: raise JournalDataBaseError("Something happened with the database of WOS journal names. To fix this you should delete the 1 to 3 files whose names start with {}. If this doesn't work (sorry), deleteing everything in '{}' and reinstalling metaknowledge should.\nThe error was '{}'".format(dbLoc, os.path.dirname(__file__), e))
[ "def", "updatej9DB", "(", "dbname", "=", "abrevDBname", ",", "saveRawHTML", "=", "False", ")", ":", "if", "saveRawHTML", ":", "rawDir", "=", "'{}/j9Raws'", ".", "format", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "if", "not", "...
Updates the database of Journal Title Abbreviations. Requires an internet connection. The data base is saved relative to the source file not the working directory. # Parameters _dbname_ : `optional [str]` > The name of the database file, default is "j9Abbreviations.db" _saveRawHTML_ : `optional [bool]` > Determines if the original HTML of the pages is stored, default `False`. If `True` they are saved in a directory inside j9Raws begining with todays date.
[ "Updates", "the", "database", "of", "Journal", "Title", "Abbreviations", ".", "Requires", "an", "internet", "connection", ".", "The", "data", "base", "is", "saved", "relative", "to", "the", "source", "file", "not", "the", "working", "directory", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L95-L128
19,904
networks-lab/metaknowledge
metaknowledge/journalAbbreviations/backend.py
getj9dict
def getj9dict(dbname = abrevDBname, manualDB = manualDBname, returnDict ='both'): """Returns the dictionary of journal abbreviations mapping to a list of the associated journal names. By default the local database is used. The database is in the file _dbname_ in the same directory as this source file # Parameters _dbname_ : `optional [str]` > The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched. _manualDB_ : `optional [str]` > The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched. _returnDict_ : `optional [str]` > default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`. """ dbLoc = os.path.normpath(os.path.dirname(__file__)) retDict = {} try: if returnDict == 'both' or returnDict == 'WOS': with dbm.dumb.open(dbLoc + '/{}'.format(dbname)) as db: if len(db) == 0: raise JournalDataBaseError("J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB().") for k, v in db.items(): retDict[k.decode('utf-8')] = v.decode('utf-8').split('|') except JournalDataBaseError: updatej9DB() return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict) try: if returnDict == 'both' or returnDict == 'manual': if os.path.isfile(dbLoc + '/{}.dat'.format(manualDB)): with dbm.dumb.open(dbLoc + '/{}'.format(manualDB)) as db: for k, v in db.items(): retDict[k.decode('utf-8')] = v.decode('utf-8').split('|') else: if returnDict == 'manual': raise JournalDataBaseError("Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})".format(manualDB)) except JournalDataBaseError: updatej9DB(dbname = manualDB) return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict) return retDict
python
def getj9dict(dbname = abrevDBname, manualDB = manualDBname, returnDict ='both'): dbLoc = os.path.normpath(os.path.dirname(__file__)) retDict = {} try: if returnDict == 'both' or returnDict == 'WOS': with dbm.dumb.open(dbLoc + '/{}'.format(dbname)) as db: if len(db) == 0: raise JournalDataBaseError("J9 Database empty or missing, to regenerate it import and run metaknowledge.WOS.journalAbbreviations.updatej9DB().") for k, v in db.items(): retDict[k.decode('utf-8')] = v.decode('utf-8').split('|') except JournalDataBaseError: updatej9DB() return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict) try: if returnDict == 'both' or returnDict == 'manual': if os.path.isfile(dbLoc + '/{}.dat'.format(manualDB)): with dbm.dumb.open(dbLoc + '/{}'.format(manualDB)) as db: for k, v in db.items(): retDict[k.decode('utf-8')] = v.decode('utf-8').split('|') else: if returnDict == 'manual': raise JournalDataBaseError("Manual J9 Database ({0}) missing, to create it run addToDB(dbname = {0})".format(manualDB)) except JournalDataBaseError: updatej9DB(dbname = manualDB) return getj9dict(dbname = dbname, manualDB = manualDB, returnDict = returnDict) return retDict
[ "def", "getj9dict", "(", "dbname", "=", "abrevDBname", ",", "manualDB", "=", "manualDBname", ",", "returnDict", "=", "'both'", ")", ":", "dbLoc", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")"...
Returns the dictionary of journal abbreviations mapping to a list of the associated journal names. By default the local database is used. The database is in the file _dbname_ in the same directory as this source file # Parameters _dbname_ : `optional [str]` > The name of the downloaded database file, the default is determined at run time. It is recommended that this remain untouched. _manualDB_ : `optional [str]` > The name of the manually created database file, the default is determined at run time. It is recommended that this remain untouched. _returnDict_ : `optional [str]` > default `'both'`, can be used to get both databases or only one with `'WOS'` or `'manual'`.
[ "Returns", "the", "dictionary", "of", "journal", "abbreviations", "mapping", "to", "a", "list", "of", "the", "associated", "journal", "names", ".", "By", "default", "the", "local", "database", "is", "used", ".", "The", "database", "is", "in", "the", "file", ...
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/journalAbbreviations/backend.py#L130-L172
19,905
networks-lab/metaknowledge
metaknowledge/WOS/tagProcessing/funcDicts.py
normalizeToTag
def normalizeToTag(val): """Converts tags or full names to 2 character tags, case insensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The short name of _val_ """ try: val = val.upper() except AttributeError: raise KeyError("{} is not a tag or name string".format(val)) if val not in tagsAndNameSetUpper: raise KeyError("{} is not a tag or name string".format(val)) else: try: return fullToTagDictUpper[val] except KeyError: return val
python
def normalizeToTag(val): try: val = val.upper() except AttributeError: raise KeyError("{} is not a tag or name string".format(val)) if val not in tagsAndNameSetUpper: raise KeyError("{} is not a tag or name string".format(val)) else: try: return fullToTagDictUpper[val] except KeyError: return val
[ "def", "normalizeToTag", "(", "val", ")", ":", "try", ":", "val", "=", "val", ".", "upper", "(", ")", "except", "AttributeError", ":", "raise", "KeyError", "(", "\"{} is not a tag or name string\"", ".", "format", "(", "val", ")", ")", "if", "val", "not", ...
Converts tags or full names to 2 character tags, case insensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The short name of _val_
[ "Converts", "tags", "or", "full", "names", "to", "2", "character", "tags", "case", "insensitive" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/funcDicts.py#L41-L66
19,906
networks-lab/metaknowledge
metaknowledge/WOS/tagProcessing/funcDicts.py
normalizeToName
def normalizeToName(val): """Converts tags or full names to full names, case sensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The full name of _val_ """ if val not in tagsAndNameSet: raise KeyError("{} is not a tag or name string".format(val)) else: try: return tagToFullDict[val] except KeyError: return val
python
def normalizeToName(val): if val not in tagsAndNameSet: raise KeyError("{} is not a tag or name string".format(val)) else: try: return tagToFullDict[val] except KeyError: return val
[ "def", "normalizeToName", "(", "val", ")", ":", "if", "val", "not", "in", "tagsAndNameSet", ":", "raise", "KeyError", "(", "\"{} is not a tag or name string\"", ".", "format", "(", "val", ")", ")", "else", ":", "try", ":", "return", "tagToFullDict", "[", "va...
Converts tags or full names to full names, case sensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The full name of _val_
[ "Converts", "tags", "or", "full", "names", "to", "full", "names", "case", "sensitive" ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/WOS/tagProcessing/funcDicts.py#L68-L89
19,907
networks-lab/metaknowledge
metaknowledge/grants/baseGrant.py
Grant.update
def update(self, other): """Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence. # Parameters _other_ : `Grant` > Another `Grant` of the same type as _self_ """ if type(self) != type(other): return NotImplemented else: if other.bad: self.error = other.error self.bad = True self._fieldDict.update(other._fieldDict)
python
def update(self, other): if type(self) != type(other): return NotImplemented else: if other.bad: self.error = other.error self.bad = True self._fieldDict.update(other._fieldDict)
[ "def", "update", "(", "self", ",", "other", ")", ":", "if", "type", "(", "self", ")", "!=", "type", "(", "other", ")", ":", "return", "NotImplemented", "else", ":", "if", "other", ".", "bad", ":", "self", ".", "error", "=", "other", ".", "error", ...
Adds all the tag-entry pairs from _other_ to the `Grant`. If there is a conflict _other_ takes precedence. # Parameters _other_ : `Grant` > Another `Grant` of the same type as _self_
[ "Adds", "all", "the", "tag", "-", "entry", "pairs", "from", "_other_", "to", "the", "Grant", ".", "If", "there", "is", "a", "conflict", "_other_", "takes", "precedence", "." ]
8162bf95e66bb6f9916081338e6e2a6132faff75
https://github.com/networks-lab/metaknowledge/blob/8162bf95e66bb6f9916081338e6e2a6132faff75/metaknowledge/grants/baseGrant.py#L99-L114
19,908
kxgames/glooey
glooey/widget.py
EventDispatcher.relay_events_from
def relay_events_from(self, originator, event_type, *more_event_types): """ Configure this handler to re-dispatch events from another handler. This method configures this handler dispatch an event of type *event_type* whenever *originator* dispatches events of the same type or any of the types in *more_event_types*. Any arguments passed to the original event are copied to the new event. This method is mean to be useful for creating composite widgets that want to present a simple API by making it seem like the events being generated by their children are actually coming from them. See the `/composing_widgets` tutorial for an example. """ handlers = { event_type: lambda *args, **kwargs: \ self.dispatch_event(event_type, *args, **kwargs) for event_type in (event_type,) + more_event_types } originator.set_handlers(**handlers)
python
def relay_events_from(self, originator, event_type, *more_event_types): handlers = { event_type: lambda *args, **kwargs: \ self.dispatch_event(event_type, *args, **kwargs) for event_type in (event_type,) + more_event_types } originator.set_handlers(**handlers)
[ "def", "relay_events_from", "(", "self", ",", "originator", ",", "event_type", ",", "*", "more_event_types", ")", ":", "handlers", "=", "{", "event_type", ":", "lambda", "*", "args", ",", "*", "*", "kwargs", ":", "self", ".", "dispatch_event", "(", "event_...
Configure this handler to re-dispatch events from another handler. This method configures this handler dispatch an event of type *event_type* whenever *originator* dispatches events of the same type or any of the types in *more_event_types*. Any arguments passed to the original event are copied to the new event. This method is mean to be useful for creating composite widgets that want to present a simple API by making it seem like the events being generated by their children are actually coming from them. See the `/composing_widgets` tutorial for an example.
[ "Configure", "this", "handler", "to", "re", "-", "dispatch", "events", "from", "another", "handler", "." ]
f0125c1f218b05cfb2efb52a88d80f54eae007a0
https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L25-L44
19,909
kxgames/glooey
glooey/widget.py
EventDispatcher.start_event
def start_event(self, event_type, *args, dt=1/60): """ Begin dispatching the given event at the given frequency. Calling this method will cause an event of type *event_type* with arguments *args* to be dispatched every *dt* seconds. This will continue until `stop_event()` is called for the same event. These continuously firing events are useful if, for example, you want to make a button that scrolls for as long as it's being held. """ # Don't bother scheduling a timer if nobody's listening. This isn't # great from a general-purpose perspective, because a long-lived event # could have listeners attach and detach in the middle. But I don't # like the idea of making a bunch of clocks to spit out a bunch of # events that are never used, although to be fair I don't actually know # how expensive that would be. If I want to make this implementation # more general purpose, I could start and stop timers as necessary in # the methods that add or remove handlers. if not any(self.__yield_handlers(event_type)): return def on_time_interval(dt): # self.dispatch_event(event_type, *args, dt) pyglet.clock.schedule_interval(on_time_interval, dt) self.__timers[event_type] = on_time_interval
python
def start_event(self, event_type, *args, dt=1/60): # Don't bother scheduling a timer if nobody's listening. This isn't # great from a general-purpose perspective, because a long-lived event # could have listeners attach and detach in the middle. But I don't # like the idea of making a bunch of clocks to spit out a bunch of # events that are never used, although to be fair I don't actually know # how expensive that would be. If I want to make this implementation # more general purpose, I could start and stop timers as necessary in # the methods that add or remove handlers. if not any(self.__yield_handlers(event_type)): return def on_time_interval(dt): # self.dispatch_event(event_type, *args, dt) pyglet.clock.schedule_interval(on_time_interval, dt) self.__timers[event_type] = on_time_interval
[ "def", "start_event", "(", "self", ",", "event_type", ",", "*", "args", ",", "dt", "=", "1", "/", "60", ")", ":", "# Don't bother scheduling a timer if nobody's listening. This isn't ", "# great from a general-purpose perspective, because a long-lived event ", "# could have li...
Begin dispatching the given event at the given frequency. Calling this method will cause an event of type *event_type* with arguments *args* to be dispatched every *dt* seconds. This will continue until `stop_event()` is called for the same event. These continuously firing events are useful if, for example, you want to make a button that scrolls for as long as it's being held.
[ "Begin", "dispatching", "the", "given", "event", "at", "the", "given", "frequency", "." ]
f0125c1f218b05cfb2efb52a88d80f54eae007a0
https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L46-L72
19,910
kxgames/glooey
glooey/widget.py
EventDispatcher.stop_event
def stop_event(self, event_type): """ Stop dispatching the given event. It is not an error to attempt to stop an event that was never started, the request will just be silently ignored. """ if event_type in self.__timers: pyglet.clock.unschedule(self.__timers[event_type])
python
def stop_event(self, event_type): if event_type in self.__timers: pyglet.clock.unschedule(self.__timers[event_type])
[ "def", "stop_event", "(", "self", ",", "event_type", ")", ":", "if", "event_type", "in", "self", ".", "__timers", ":", "pyglet", ".", "clock", ".", "unschedule", "(", "self", ".", "__timers", "[", "event_type", "]", ")" ]
Stop dispatching the given event. It is not an error to attempt to stop an event that was never started, the request will just be silently ignored.
[ "Stop", "dispatching", "the", "given", "event", "." ]
f0125c1f218b05cfb2efb52a88d80f54eae007a0
https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L74-L82
19,911
kxgames/glooey
glooey/widget.py
EventDispatcher.__yield_handlers
def __yield_handlers(self, event_type): """ Yield all the handlers registered for the given event type. """ if event_type not in self.event_types: raise ValueError("%r not found in %r.event_types == %r" % (event_type, self, self.event_types)) # Search handler stack for matching event handlers for frame in list(self._event_stack): if event_type in frame: yield frame[event_type] # Check instance for an event handler if hasattr(self, event_type): yield getattr(self, event_type)
python
def __yield_handlers(self, event_type): if event_type not in self.event_types: raise ValueError("%r not found in %r.event_types == %r" % (event_type, self, self.event_types)) # Search handler stack for matching event handlers for frame in list(self._event_stack): if event_type in frame: yield frame[event_type] # Check instance for an event handler if hasattr(self, event_type): yield getattr(self, event_type)
[ "def", "__yield_handlers", "(", "self", ",", "event_type", ")", ":", "if", "event_type", "not", "in", "self", ".", "event_types", ":", "raise", "ValueError", "(", "\"%r not found in %r.event_types == %r\"", "%", "(", "event_type", ",", "self", ",", "self", ".", ...
Yield all the handlers registered for the given event type.
[ "Yield", "all", "the", "handlers", "registered", "for", "the", "given", "event", "type", "." ]
f0125c1f218b05cfb2efb52a88d80f54eae007a0
https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/widget.py#L84-L98
19,912
kxgames/glooey
glooey/helpers.py
HoldUpdatesMixin._filter_pending_updates
def _filter_pending_updates(self): """ Return all the updates that need to be applied, from a list of all the updates that were called while the hold was active. This method is meant to be overridden by subclasses that want to customize how held updates are applied. The `self._pending_updates` member variable is a list containing a (method, args, kwargs) tuple for each update that was called while updates were being held. This list is in the order that the updates were actually called, and any updates that were called more than once will appear in this list more than once. This method should yield or return an list of the tuples in the same format representing the updates that should be applied, in the order they should be applied. The default implementation filters out duplicate updates without changing their order. In cases where it matters, the last call to each update is used to determine the order. """ from more_itertools import unique_everseen as unique yield from reversed(list(unique(reversed(self._pending_updates))))
python
def _filter_pending_updates(self): from more_itertools import unique_everseen as unique yield from reversed(list(unique(reversed(self._pending_updates))))
[ "def", "_filter_pending_updates", "(", "self", ")", ":", "from", "more_itertools", "import", "unique_everseen", "as", "unique", "yield", "from", "reversed", "(", "list", "(", "unique", "(", "reversed", "(", "self", ".", "_pending_updates", ")", ")", ")", ")" ]
Return all the updates that need to be applied, from a list of all the updates that were called while the hold was active. This method is meant to be overridden by subclasses that want to customize how held updates are applied. The `self._pending_updates` member variable is a list containing a (method, args, kwargs) tuple for each update that was called while updates were being held. This list is in the order that the updates were actually called, and any updates that were called more than once will appear in this list more than once. This method should yield or return an list of the tuples in the same format representing the updates that should be applied, in the order they should be applied. The default implementation filters out duplicate updates without changing their order. In cases where it matters, the last call to each update is used to determine the order.
[ "Return", "all", "the", "updates", "that", "need", "to", "be", "applied", "from", "a", "list", "of", "all", "the", "updates", "that", "were", "called", "while", "the", "hold", "was", "active", ".", "This", "method", "is", "meant", "to", "be", "overridden...
f0125c1f218b05cfb2efb52a88d80f54eae007a0
https://github.com/kxgames/glooey/blob/f0125c1f218b05cfb2efb52a88d80f54eae007a0/glooey/helpers.py#L59-L79
19,913
csurfer/gitsuggest
gitsuggest/utilities.py
ReposToHTML.get_html
def get_html(self): """Method to convert the repository list to a search results page.""" here = path.abspath(path.dirname(__file__)) env = Environment(loader=FileSystemLoader(path.join(here, "res/"))) suggest = env.get_template("suggest.htm.j2") return suggest.render( logo=path.join(here, "res/logo.png"), user_login=self.user, repos=self.repos, )
python
def get_html(self): here = path.abspath(path.dirname(__file__)) env = Environment(loader=FileSystemLoader(path.join(here, "res/"))) suggest = env.get_template("suggest.htm.j2") return suggest.render( logo=path.join(here, "res/logo.png"), user_login=self.user, repos=self.repos, )
[ "def", "get_html", "(", "self", ")", ":", "here", "=", "path", ".", "abspath", "(", "path", ".", "dirname", "(", "__file__", ")", ")", "env", "=", "Environment", "(", "loader", "=", "FileSystemLoader", "(", "path", ".", "join", "(", "here", ",", "\"r...
Method to convert the repository list to a search results page.
[ "Method", "to", "convert", "the", "repository", "list", "to", "a", "search", "results", "page", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/utilities.py#L26-L37
19,914
csurfer/gitsuggest
gitsuggest/utilities.py
ReposToHTML.to_html
def to_html(self, write_to): """Method to convert the repository list to a search results page and write it to a HTML file. :param write_to: File/Path to write the html file to. """ page_html = self.get_html() with open(write_to, "wb") as writefile: writefile.write(page_html.encode("utf-8"))
python
def to_html(self, write_to): page_html = self.get_html() with open(write_to, "wb") as writefile: writefile.write(page_html.encode("utf-8"))
[ "def", "to_html", "(", "self", ",", "write_to", ")", ":", "page_html", "=", "self", ".", "get_html", "(", ")", "with", "open", "(", "write_to", ",", "\"wb\"", ")", "as", "writefile", ":", "writefile", ".", "write", "(", "page_html", ".", "encode", "(",...
Method to convert the repository list to a search results page and write it to a HTML file. :param write_to: File/Path to write the html file to.
[ "Method", "to", "convert", "the", "repository", "list", "to", "a", "search", "results", "page", "and", "write", "it", "to", "a", "HTML", "file", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/utilities.py#L39-L48
19,915
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.get_unique_repositories
def get_unique_repositories(repo_list): """Method to create unique list of repositories from the list of repositories given. :param repo_list: List of repositories which might contain duplicates. :return: List of repositories with no duplicate in them. """ unique_list = list() included = defaultdict(lambda: False) for repo in repo_list: if not included[repo.full_name]: unique_list.append(repo) included[repo.full_name] = True return unique_list
python
def get_unique_repositories(repo_list): unique_list = list() included = defaultdict(lambda: False) for repo in repo_list: if not included[repo.full_name]: unique_list.append(repo) included[repo.full_name] = True return unique_list
[ "def", "get_unique_repositories", "(", "repo_list", ")", ":", "unique_list", "=", "list", "(", ")", "included", "=", "defaultdict", "(", "lambda", ":", "False", ")", "for", "repo", "in", "repo_list", ":", "if", "not", "included", "[", "repo", ".", "full_na...
Method to create unique list of repositories from the list of repositories given. :param repo_list: List of repositories which might contain duplicates. :return: List of repositories with no duplicate in them.
[ "Method", "to", "create", "unique", "list", "of", "repositories", "from", "the", "list", "of", "repositories", "given", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L74-L87
19,916
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.minus
def minus(repo_list_a, repo_list_b): """Method to create a list of repositories such that the repository belongs to repo list a but not repo list b. In an ideal scenario we should be able to do this by set(a) - set(b) but as GithubRepositories have shown that set() on them is not reliable resort to this until it is all sorted out. :param repo_list_a: List of repositories. :param repo_list_b: List of repositories. """ included = defaultdict(lambda: False) for repo in repo_list_b: included[repo.full_name] = True a_minus_b = list() for repo in repo_list_a: if not included[repo.full_name]: included[repo.full_name] = True a_minus_b.append(repo) return a_minus_b
python
def minus(repo_list_a, repo_list_b): included = defaultdict(lambda: False) for repo in repo_list_b: included[repo.full_name] = True a_minus_b = list() for repo in repo_list_a: if not included[repo.full_name]: included[repo.full_name] = True a_minus_b.append(repo) return a_minus_b
[ "def", "minus", "(", "repo_list_a", ",", "repo_list_b", ")", ":", "included", "=", "defaultdict", "(", "lambda", ":", "False", ")", "for", "repo", "in", "repo_list_b", ":", "included", "[", "repo", ".", "full_name", "]", "=", "True", "a_minus_b", "=", "l...
Method to create a list of repositories such that the repository belongs to repo list a but not repo list b. In an ideal scenario we should be able to do this by set(a) - set(b) but as GithubRepositories have shown that set() on them is not reliable resort to this until it is all sorted out. :param repo_list_a: List of repositories. :param repo_list_b: List of repositories.
[ "Method", "to", "create", "a", "list", "of", "repositories", "such", "that", "the", "repository", "belongs", "to", "repo", "list", "a", "but", "not", "repo", "list", "b", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L90-L112
19,917
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.__populate_repositories_of_interest
def __populate_repositories_of_interest(self, username): """Method to populate repositories which will be used to suggest repositories for the user. For this purpose we use two kinds of repositories. 1. Repositories starred by user him/herself. 2. Repositories starred by the users followed by the user. :param username: Username for the user for whom repositories are being suggested for. """ # Handle to the user to whom repositories need to be suggested. user = self.github.get_user(username) # Procure repositories starred by the user. self.user_starred_repositories.extend(user.get_starred()) # Repositories starred by users followed by the user. if self.deep_dive: for following_user in user.get_following(): self.user_following_starred_repositories.extend( following_user.get_starred() )
python
def __populate_repositories_of_interest(self, username): # Handle to the user to whom repositories need to be suggested. user = self.github.get_user(username) # Procure repositories starred by the user. self.user_starred_repositories.extend(user.get_starred()) # Repositories starred by users followed by the user. if self.deep_dive: for following_user in user.get_following(): self.user_following_starred_repositories.extend( following_user.get_starred() )
[ "def", "__populate_repositories_of_interest", "(", "self", ",", "username", ")", ":", "# Handle to the user to whom repositories need to be suggested.", "user", "=", "self", ".", "github", ".", "get_user", "(", "username", ")", "# Procure repositories starred by the user.", "...
Method to populate repositories which will be used to suggest repositories for the user. For this purpose we use two kinds of repositories. 1. Repositories starred by user him/herself. 2. Repositories starred by the users followed by the user. :param username: Username for the user for whom repositories are being suggested for.
[ "Method", "to", "populate", "repositories", "which", "will", "be", "used", "to", "suggest", "repositories", "for", "the", "user", ".", "For", "this", "purpose", "we", "use", "two", "kinds", "of", "repositories", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L114-L136
19,918
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.__get_interests
def __get_interests(self): """Method to procure description of repositories the authenticated user is interested in. We currently attribute interest to: 1. The repositories the authenticated user has starred. 2. The repositories the users the authenticated user follows have starred. :return: List of repository descriptions. """ # All repositories of interest. repos_of_interest = itertools.chain( self.user_starred_repositories, self.user_following_starred_repositories, ) # Extract descriptions out of repositories of interest. repo_descriptions = [repo.description for repo in repos_of_interest] return list(set(repo_descriptions))
python
def __get_interests(self): # All repositories of interest. repos_of_interest = itertools.chain( self.user_starred_repositories, self.user_following_starred_repositories, ) # Extract descriptions out of repositories of interest. repo_descriptions = [repo.description for repo in repos_of_interest] return list(set(repo_descriptions))
[ "def", "__get_interests", "(", "self", ")", ":", "# All repositories of interest.", "repos_of_interest", "=", "itertools", ".", "chain", "(", "self", ".", "user_starred_repositories", ",", "self", ".", "user_following_starred_repositories", ",", ")", "# Extract descriptio...
Method to procure description of repositories the authenticated user is interested in. We currently attribute interest to: 1. The repositories the authenticated user has starred. 2. The repositories the users the authenticated user follows have starred. :return: List of repository descriptions.
[ "Method", "to", "procure", "description", "of", "repositories", "the", "authenticated", "user", "is", "interested", "in", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L138-L157
19,919
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.__get_words_to_ignore
def __get_words_to_ignore(self): """Compiles list of all words to ignore. :return: List of words to ignore. """ # Stop words in English. english_stopwords = stopwords.words("english") here = path.abspath(path.dirname(__file__)) # Languages in git repositories. git_languages = [] with open(path.join(here, "gitlang/languages.txt"), "r") as langauges: git_languages = [line.strip() for line in langauges] # Other words to avoid in git repositories. words_to_avoid = [] with open(path.join(here, "gitlang/others.txt"), "r") as languages: words_to_avoid = [line.strip() for line in languages] return set( itertools.chain(english_stopwords, git_languages, words_to_avoid) )
python
def __get_words_to_ignore(self): # Stop words in English. english_stopwords = stopwords.words("english") here = path.abspath(path.dirname(__file__)) # Languages in git repositories. git_languages = [] with open(path.join(here, "gitlang/languages.txt"), "r") as langauges: git_languages = [line.strip() for line in langauges] # Other words to avoid in git repositories. words_to_avoid = [] with open(path.join(here, "gitlang/others.txt"), "r") as languages: words_to_avoid = [line.strip() for line in languages] return set( itertools.chain(english_stopwords, git_languages, words_to_avoid) )
[ "def", "__get_words_to_ignore", "(", "self", ")", ":", "# Stop words in English.", "english_stopwords", "=", "stopwords", ".", "words", "(", "\"english\"", ")", "here", "=", "path", ".", "abspath", "(", "path", ".", "dirname", "(", "__file__", ")", ")", "# Lan...
Compiles list of all words to ignore. :return: List of words to ignore.
[ "Compiles", "list", "of", "all", "words", "to", "ignore", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L159-L181
19,920
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.__clean_and_tokenize
def __clean_and_tokenize(self, doc_list): """Method to clean and tokenize the document list. :param doc_list: Document list to clean and tokenize. :return: Cleaned and tokenized document list. """ # Some repositories fill entire documentation in description. We ignore # such repositories for cleaner tokens. doc_list = filter( lambda x: x is not None and len(x) <= GitSuggest.MAX_DESC_LEN, doc_list, ) cleaned_doc_list = list() # Regular expression to remove out all punctuations, numbers and other # un-necessary text substrings like emojis etc. tokenizer = RegexpTokenizer(r"[a-zA-Z]+") # Get stop words. stopwords = self.__get_words_to_ignore() # Get english words. dict_words = self.__get_words_to_consider() for doc in doc_list: # Lowercase doc. lower = doc.lower() # Tokenize removing numbers and punctuation. tokens = tokenizer.tokenize(lower) # Include meaningful words. tokens = [tok for tok in tokens if tok in dict_words] # Remove stopwords. tokens = [tok for tok in tokens if tok not in stopwords] # Filter Nones if any are introduced. tokens = [tok for tok in tokens if tok is not None] cleaned_doc_list.append(tokens) return cleaned_doc_list
python
def __clean_and_tokenize(self, doc_list): # Some repositories fill entire documentation in description. We ignore # such repositories for cleaner tokens. doc_list = filter( lambda x: x is not None and len(x) <= GitSuggest.MAX_DESC_LEN, doc_list, ) cleaned_doc_list = list() # Regular expression to remove out all punctuations, numbers and other # un-necessary text substrings like emojis etc. tokenizer = RegexpTokenizer(r"[a-zA-Z]+") # Get stop words. stopwords = self.__get_words_to_ignore() # Get english words. dict_words = self.__get_words_to_consider() for doc in doc_list: # Lowercase doc. lower = doc.lower() # Tokenize removing numbers and punctuation. tokens = tokenizer.tokenize(lower) # Include meaningful words. tokens = [tok for tok in tokens if tok in dict_words] # Remove stopwords. tokens = [tok for tok in tokens if tok not in stopwords] # Filter Nones if any are introduced. tokens = [tok for tok in tokens if tok is not None] cleaned_doc_list.append(tokens) return cleaned_doc_list
[ "def", "__clean_and_tokenize", "(", "self", ",", "doc_list", ")", ":", "# Some repositories fill entire documentation in description. We ignore", "# such repositories for cleaner tokens.", "doc_list", "=", "filter", "(", "lambda", "x", ":", "x", "is", "not", "None", "and", ...
Method to clean and tokenize the document list. :param doc_list: Document list to clean and tokenize. :return: Cleaned and tokenized document list.
[ "Method", "to", "clean", "and", "tokenize", "the", "document", "list", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L190-L233
19,921
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.__construct_lda_model
def __construct_lda_model(self): """Method to create LDA model to procure list of topics from. We do that by first fetching the descriptions of repositories user has shown interest in. We tokenize the hence fetched descriptions to procure list of cleaned tokens by dropping all the stop words and language names from it. We use the cleaned and sanitized token list to train LDA model from which we hope to procure topics of interests to the authenticated user. """ # Fetch descriptions of repos of interest to authenticated user. repos_of_interest = self.__get_interests() # Procure clean tokens from the descriptions. cleaned_tokens = self.__clean_and_tokenize(repos_of_interest) # If cleaned tokens are empty, it can cause an exception while # generating LDA. But tokens shouldn't be something meaningful as that # would mean we are suggesting repos without reason. Hence the random # string to ensure that LDA doesn't cause exception but the token # doesn't generate any suggestions either. if not cleaned_tokens: cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]] # Setup LDA requisites. dictionary = corpora.Dictionary(cleaned_tokens) corpus = [dictionary.doc2bow(text) for text in cleaned_tokens] # Generate LDA model self.lda_model = models.ldamodel.LdaModel( corpus, num_topics=1, id2word=dictionary, passes=10 )
python
def __construct_lda_model(self): # Fetch descriptions of repos of interest to authenticated user. repos_of_interest = self.__get_interests() # Procure clean tokens from the descriptions. cleaned_tokens = self.__clean_and_tokenize(repos_of_interest) # If cleaned tokens are empty, it can cause an exception while # generating LDA. But tokens shouldn't be something meaningful as that # would mean we are suggesting repos without reason. Hence the random # string to ensure that LDA doesn't cause exception but the token # doesn't generate any suggestions either. if not cleaned_tokens: cleaned_tokens = [["zkfgzkfgzkfgzkfgzkfgzkfg"]] # Setup LDA requisites. dictionary = corpora.Dictionary(cleaned_tokens) corpus = [dictionary.doc2bow(text) for text in cleaned_tokens] # Generate LDA model self.lda_model = models.ldamodel.LdaModel( corpus, num_topics=1, id2word=dictionary, passes=10 )
[ "def", "__construct_lda_model", "(", "self", ")", ":", "# Fetch descriptions of repos of interest to authenticated user.", "repos_of_interest", "=", "self", ".", "__get_interests", "(", ")", "# Procure clean tokens from the descriptions.", "cleaned_tokens", "=", "self", ".", "_...
Method to create LDA model to procure list of topics from. We do that by first fetching the descriptions of repositories user has shown interest in. We tokenize the hence fetched descriptions to procure list of cleaned tokens by dropping all the stop words and language names from it. We use the cleaned and sanitized token list to train LDA model from which we hope to procure topics of interests to the authenticated user.
[ "Method", "to", "create", "LDA", "model", "to", "procure", "list", "of", "topics", "from", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L235-L267
19,922
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.__get_query_for_repos
def __get_query_for_repos(self, term_count=5): """Method to procure query based on topics authenticated user is interested in. :param term_count: Count of terms in query. :return: Query string. """ repo_query_terms = list() for term in self.lda_model.get_topic_terms(0, topn=term_count): repo_query_terms.append(self.lda_model.id2word[term[0]]) return " ".join(repo_query_terms)
python
def __get_query_for_repos(self, term_count=5): repo_query_terms = list() for term in self.lda_model.get_topic_terms(0, topn=term_count): repo_query_terms.append(self.lda_model.id2word[term[0]]) return " ".join(repo_query_terms)
[ "def", "__get_query_for_repos", "(", "self", ",", "term_count", "=", "5", ")", ":", "repo_query_terms", "=", "list", "(", ")", "for", "term", "in", "self", ".", "lda_model", ".", "get_topic_terms", "(", "0", ",", "topn", "=", "term_count", ")", ":", "rep...
Method to procure query based on topics authenticated user is interested in. :param term_count: Count of terms in query. :return: Query string.
[ "Method", "to", "procure", "query", "based", "on", "topics", "authenticated", "user", "is", "interested", "in", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L269-L279
19,923
csurfer/gitsuggest
gitsuggest/suggest.py
GitSuggest.get_suggested_repositories
def get_suggested_repositories(self): """Method to procure suggested repositories for the user. :return: Iterator to procure suggested repositories for the user. """ if self.suggested_repositories is None: # Procure repositories to suggest to user. repository_set = list() for term_count in range(5, 2, -1): query = self.__get_query_for_repos(term_count=term_count) repository_set.extend(self.__get_repos_for_query(query)) # Remove repositories authenticated user is already interested in. catchy_repos = GitSuggest.minus( repository_set, self.user_starred_repositories ) # Filter out repositories with too long descriptions. This is a # measure to weed out spammy repositories. filtered_repos = [] if len(catchy_repos) > 0: for repo in catchy_repos: if ( repo is not None and repo.description is not None and len(repo.description) <= GitSuggest.MAX_DESC_LEN ): filtered_repos.append(repo) # Present the repositories, highly starred to not starred. filtered_repos = sorted( filtered_repos, key=attrgetter("stargazers_count"), reverse=True, ) self.suggested_repositories = GitSuggest.get_unique_repositories( filtered_repos ) # Return an iterator to help user fetch the repository listing. for repository in self.suggested_repositories: yield repository
python
def get_suggested_repositories(self): if self.suggested_repositories is None: # Procure repositories to suggest to user. repository_set = list() for term_count in range(5, 2, -1): query = self.__get_query_for_repos(term_count=term_count) repository_set.extend(self.__get_repos_for_query(query)) # Remove repositories authenticated user is already interested in. catchy_repos = GitSuggest.minus( repository_set, self.user_starred_repositories ) # Filter out repositories with too long descriptions. This is a # measure to weed out spammy repositories. filtered_repos = [] if len(catchy_repos) > 0: for repo in catchy_repos: if ( repo is not None and repo.description is not None and len(repo.description) <= GitSuggest.MAX_DESC_LEN ): filtered_repos.append(repo) # Present the repositories, highly starred to not starred. filtered_repos = sorted( filtered_repos, key=attrgetter("stargazers_count"), reverse=True, ) self.suggested_repositories = GitSuggest.get_unique_repositories( filtered_repos ) # Return an iterator to help user fetch the repository listing. for repository in self.suggested_repositories: yield repository
[ "def", "get_suggested_repositories", "(", "self", ")", ":", "if", "self", ".", "suggested_repositories", "is", "None", ":", "# Procure repositories to suggest to user.", "repository_set", "=", "list", "(", ")", "for", "term_count", "in", "range", "(", "5", ",", "2...
Method to procure suggested repositories for the user. :return: Iterator to procure suggested repositories for the user.
[ "Method", "to", "procure", "suggested", "repositories", "for", "the", "user", "." ]
02efdbf50acb094e502aef9c139dde62676455ee
https://github.com/csurfer/gitsuggest/blob/02efdbf50acb094e502aef9c139dde62676455ee/gitsuggest/suggest.py#L296-L339
19,924
bcicen/wikitables
wikitables/util.py
guess_type
def guess_type(s): """ attempt to convert string value into numeric type """ sc = s.replace(',', '') # remove comma from potential numbers try: return int(sc) except ValueError: pass try: return float(sc) except ValueError: pass return s
python
def guess_type(s): sc = s.replace(',', '') # remove comma from potential numbers try: return int(sc) except ValueError: pass try: return float(sc) except ValueError: pass return s
[ "def", "guess_type", "(", "s", ")", ":", "sc", "=", "s", ".", "replace", "(", "','", ",", "''", ")", "# remove comma from potential numbers", "try", ":", "return", "int", "(", "sc", ")", "except", "ValueError", ":", "pass", "try", ":", "return", "float",...
attempt to convert string value into numeric type
[ "attempt", "to", "convert", "string", "value", "into", "numeric", "type" ]
055cbabaa60762edbab78bf6a76ba19875f328f7
https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/util.py#L15-L29
19,925
bcicen/wikitables
wikitables/readers.py
FieldReader.parse
def parse(self, node): """ Return generator yielding Field objects for a given node """ self._attrs = {} vals = [] yielded = False for x in self._read_parts(node): if isinstance(x, Field): yielded = True x.attrs = self._attrs yield x else: vals.append(ustr(x).strip(' \n\t')) joined = ' '.join([ x for x in vals if x ]) if joined: yielded = True yield Field(node, guess_type(joined), self._attrs) if not yielded: yield Field(node, "", self._attrs)
python
def parse(self, node): self._attrs = {} vals = [] yielded = False for x in self._read_parts(node): if isinstance(x, Field): yielded = True x.attrs = self._attrs yield x else: vals.append(ustr(x).strip(' \n\t')) joined = ' '.join([ x for x in vals if x ]) if joined: yielded = True yield Field(node, guess_type(joined), self._attrs) if not yielded: yield Field(node, "", self._attrs)
[ "def", "parse", "(", "self", ",", "node", ")", ":", "self", ".", "_attrs", "=", "{", "}", "vals", "=", "[", "]", "yielded", "=", "False", "for", "x", "in", "self", ".", "_read_parts", "(", "node", ")", ":", "if", "isinstance", "(", "x", ",", "F...
Return generator yielding Field objects for a given node
[ "Return", "generator", "yielding", "Field", "objects", "for", "a", "given", "node" ]
055cbabaa60762edbab78bf6a76ba19875f328f7
https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/readers.py#L21-L43
19,926
bcicen/wikitables
wikitables/readers.py
RowReader.parse
def parse(self, *nodes): """ Parse one or more `tr` nodes, yielding wikitables.Row objects """ for n in nodes: if not n.contents: continue row = self._parse(n) if not row.is_null: yield row
python
def parse(self, *nodes): for n in nodes: if not n.contents: continue row = self._parse(n) if not row.is_null: yield row
[ "def", "parse", "(", "self", ",", "*", "nodes", ")", ":", "for", "n", "in", "nodes", ":", "if", "not", "n", ".", "contents", ":", "continue", "row", "=", "self", ".", "_parse", "(", "n", ")", "if", "not", "row", ".", "is_null", ":", "yield", "r...
Parse one or more `tr` nodes, yielding wikitables.Row objects
[ "Parse", "one", "or", "more", "tr", "nodes", "yielding", "wikitables", ".", "Row", "objects" ]
055cbabaa60762edbab78bf6a76ba19875f328f7
https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/readers.py#L102-L111
19,927
bcicen/wikitables
wikitables/__init__.py
WikiTable._find_header_row
def _find_header_row(self): """ Evaluate all rows and determine header position, based on greatest number of 'th' tagged elements """ th_max = 0 header_idx = 0 for idx, tr in enumerate(self._tr_nodes): th_count = len(tr.contents.filter_tags(matches=ftag('th'))) if th_count > th_max: th_max = th_count header_idx = idx if not th_max: return self._log('found header at row %d (%d <th> elements)' % \ (header_idx, th_max)) header_row = self._tr_nodes.pop(header_idx) return header_row.contents.filter_tags(matches=ftag('th'))
python
def _find_header_row(self): th_max = 0 header_idx = 0 for idx, tr in enumerate(self._tr_nodes): th_count = len(tr.contents.filter_tags(matches=ftag('th'))) if th_count > th_max: th_max = th_count header_idx = idx if not th_max: return self._log('found header at row %d (%d <th> elements)' % \ (header_idx, th_max)) header_row = self._tr_nodes.pop(header_idx) return header_row.contents.filter_tags(matches=ftag('th'))
[ "def", "_find_header_row", "(", "self", ")", ":", "th_max", "=", "0", "header_idx", "=", "0", "for", "idx", ",", "tr", "in", "enumerate", "(", "self", ".", "_tr_nodes", ")", ":", "th_count", "=", "len", "(", "tr", ".", "contents", ".", "filter_tags", ...
Evaluate all rows and determine header position, based on greatest number of 'th' tagged elements
[ "Evaluate", "all", "rows", "and", "determine", "header", "position", "based", "on", "greatest", "number", "of", "th", "tagged", "elements" ]
055cbabaa60762edbab78bf6a76ba19875f328f7
https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/__init__.py#L92-L112
19,928
bcicen/wikitables
wikitables/__init__.py
WikiTable._make_default_header
def _make_default_header(self): """ Return a generic placeholder header based on the tables column count """ td_max = 0 for idx, tr in enumerate(self._tr_nodes): td_count = len(tr.contents.filter_tags(matches=ftag('td'))) if td_count > td_max: td_max = td_count self._log('creating default header (%d columns)' % td_max) return [ 'column%d' % n for n in range(0,td_max) ]
python
def _make_default_header(self): td_max = 0 for idx, tr in enumerate(self._tr_nodes): td_count = len(tr.contents.filter_tags(matches=ftag('td'))) if td_count > td_max: td_max = td_count self._log('creating default header (%d columns)' % td_max) return [ 'column%d' % n for n in range(0,td_max) ]
[ "def", "_make_default_header", "(", "self", ")", ":", "td_max", "=", "0", "for", "idx", ",", "tr", "in", "enumerate", "(", "self", ".", "_tr_nodes", ")", ":", "td_count", "=", "len", "(", "tr", ".", "contents", ".", "filter_tags", "(", "matches", "=", ...
Return a generic placeholder header based on the tables column count
[ "Return", "a", "generic", "placeholder", "header", "based", "on", "the", "tables", "column", "count" ]
055cbabaa60762edbab78bf6a76ba19875f328f7
https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/__init__.py#L114-L126
19,929
bcicen/wikitables
wikitables/client.py
Client.fetch_page
def fetch_page(self, title, method='GET'): """ Query for page by title """ params = { 'prop': 'revisions', 'format': 'json', 'action': 'query', 'explaintext': '', 'titles': title, 'rvprop': 'content' } r = self.request(method, self.base_url, params=params) r.raise_for_status() pages = r.json()["query"]["pages"] # use key from first result in 'pages' array pageid = list(pages.keys())[0] if pageid == '-1': raise ArticleNotFound('no matching articles returned') return pages[pageid]
python
def fetch_page(self, title, method='GET'): params = { 'prop': 'revisions', 'format': 'json', 'action': 'query', 'explaintext': '', 'titles': title, 'rvprop': 'content' } r = self.request(method, self.base_url, params=params) r.raise_for_status() pages = r.json()["query"]["pages"] # use key from first result in 'pages' array pageid = list(pages.keys())[0] if pageid == '-1': raise ArticleNotFound('no matching articles returned') return pages[pageid]
[ "def", "fetch_page", "(", "self", ",", "title", ",", "method", "=", "'GET'", ")", ":", "params", "=", "{", "'prop'", ":", "'revisions'", ",", "'format'", ":", "'json'", ",", "'action'", ":", "'query'", ",", "'explaintext'", ":", "''", ",", "'titles'", ...
Query for page by title
[ "Query", "for", "page", "by", "title" ]
055cbabaa60762edbab78bf6a76ba19875f328f7
https://github.com/bcicen/wikitables/blob/055cbabaa60762edbab78bf6a76ba19875f328f7/wikitables/client.py#L16-L32
19,930
wooparadog/pystack
pystack.py
print_stack
def print_stack(pid, include_greenlet=False, debugger=None, verbose=False): """Executes a file in a running Python process.""" # TextIOWrapper of Python 3 is so strange. sys_stdout = getattr(sys.stdout, 'buffer', sys.stdout) sys_stderr = getattr(sys.stderr, 'buffer', sys.stderr) make_args = make_gdb_args environ = dict(os.environ) if ( debugger == 'lldb' or (debugger is None and platform.system().lower() == 'darwin') ): make_args = make_lldb_args # fix the PATH environment variable for using built-in Python with lldb environ['PATH'] = '/usr/bin:%s' % environ.get('PATH', '') tmp_fd, tmp_path = tempfile.mkstemp() os.chmod(tmp_path, 0o777) commands = [] commands.append(FILE_OPEN_COMMAND) commands.extend(UTILITY_COMMANDS) commands.extend(THREAD_STACK_COMMANDS) if include_greenlet: commands.extend(GREENLET_STACK_COMMANDS) commands.append(FILE_CLOSE_COMMAND) command = r';'.join(commands) args = make_args(pid, command % tmp_path) process = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if verbose: sys_stderr.write(b'Standard Output:\n%s\n' % out) sys_stderr.write(b'Standard Error:\n%s\n' % err) sys_stderr.flush() for chunk in iter(functools.partial(os.read, tmp_fd, 1024), b''): sys_stdout.write(chunk) sys_stdout.write(b'\n') sys_stdout.flush()
python
def print_stack(pid, include_greenlet=False, debugger=None, verbose=False): # TextIOWrapper of Python 3 is so strange. sys_stdout = getattr(sys.stdout, 'buffer', sys.stdout) sys_stderr = getattr(sys.stderr, 'buffer', sys.stderr) make_args = make_gdb_args environ = dict(os.environ) if ( debugger == 'lldb' or (debugger is None and platform.system().lower() == 'darwin') ): make_args = make_lldb_args # fix the PATH environment variable for using built-in Python with lldb environ['PATH'] = '/usr/bin:%s' % environ.get('PATH', '') tmp_fd, tmp_path = tempfile.mkstemp() os.chmod(tmp_path, 0o777) commands = [] commands.append(FILE_OPEN_COMMAND) commands.extend(UTILITY_COMMANDS) commands.extend(THREAD_STACK_COMMANDS) if include_greenlet: commands.extend(GREENLET_STACK_COMMANDS) commands.append(FILE_CLOSE_COMMAND) command = r';'.join(commands) args = make_args(pid, command % tmp_path) process = subprocess.Popen( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if verbose: sys_stderr.write(b'Standard Output:\n%s\n' % out) sys_stderr.write(b'Standard Error:\n%s\n' % err) sys_stderr.flush() for chunk in iter(functools.partial(os.read, tmp_fd, 1024), b''): sys_stdout.write(chunk) sys_stdout.write(b'\n') sys_stdout.flush()
[ "def", "print_stack", "(", "pid", ",", "include_greenlet", "=", "False", ",", "debugger", "=", "None", ",", "verbose", "=", "False", ")", ":", "# TextIOWrapper of Python 3 is so strange.", "sys_stdout", "=", "getattr", "(", "sys", ".", "stdout", ",", "'buffer'",...
Executes a file in a running Python process.
[ "Executes", "a", "file", "in", "a", "running", "Python", "process", "." ]
1ee5bb0ab516f60dd407d7b18d2faa752a8e289c
https://github.com/wooparadog/pystack/blob/1ee5bb0ab516f60dd407d7b18d2faa752a8e289c/pystack.py#L77-L116
19,931
wooparadog/pystack
pystack.py
cli_main
def cli_main(pid, include_greenlet, debugger, verbose): '''Print stack of python process. $ pystack <pid> ''' try: print_stack(pid, include_greenlet, debugger, verbose) except DebuggerNotFound as e: click.echo('DebuggerNotFound: %s' % e.args[0], err=True) click.get_current_context().exit(1)
python
def cli_main(pid, include_greenlet, debugger, verbose): '''Print stack of python process. $ pystack <pid> ''' try: print_stack(pid, include_greenlet, debugger, verbose) except DebuggerNotFound as e: click.echo('DebuggerNotFound: %s' % e.args[0], err=True) click.get_current_context().exit(1)
[ "def", "cli_main", "(", "pid", ",", "include_greenlet", ",", "debugger", ",", "verbose", ")", ":", "try", ":", "print_stack", "(", "pid", ",", "include_greenlet", ",", "debugger", ",", "verbose", ")", "except", "DebuggerNotFound", "as", "e", ":", "click", ...
Print stack of python process. $ pystack <pid>
[ "Print", "stack", "of", "python", "process", "." ]
1ee5bb0ab516f60dd407d7b18d2faa752a8e289c
https://github.com/wooparadog/pystack/blob/1ee5bb0ab516f60dd407d7b18d2faa752a8e289c/pystack.py#L131-L140
19,932
rahul13ramesh/hidden_markov
hidden_markov/hmm_class.py
hmm.forward_algo
def forward_algo(self,observations): """ Finds the probability of an observation sequence for given model parameters **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: The probability of occurence of the observation sequence :rtype: float **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.forward_algo(observations)) .. note:: No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead. """ # Store total number of observations total_stages = len(observations) total_stages = len(observations) # Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number # Inittialize Alpha ob_ind = self.obs_map[ observations[0] ] alpha = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Iteratively find alpha(using knowledge of alpha in the previous stage) for curr_t in range(1,total_stages): ob_ind = self.obs_map[observations[curr_t]] alpha = np.dot( alpha , self.trans_prob) alpha = np.multiply( alpha , np.transpose( self.em_prob[:,ob_ind] )) # Sum the alpha's over the last stage total_prob = alpha.sum() return ( total_prob )
python
def forward_algo(self,observations): # Store total number of observations total_stages = len(observations) total_stages = len(observations) # Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iteration number # Inittialize Alpha ob_ind = self.obs_map[ observations[0] ] alpha = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Iteratively find alpha(using knowledge of alpha in the previous stage) for curr_t in range(1,total_stages): ob_ind = self.obs_map[observations[curr_t]] alpha = np.dot( alpha , self.trans_prob) alpha = np.multiply( alpha , np.transpose( self.em_prob[:,ob_ind] )) # Sum the alpha's over the last stage total_prob = alpha.sum() return ( total_prob )
[ "def", "forward_algo", "(", "self", ",", "observations", ")", ":", "# Store total number of observations total_stages = len(observations) ", "total_stages", "=", "len", "(", "observations", ")", "# Alpha[i] stores the probability of reaching state 'i' in stage 'j' where 'j' is the iter...
Finds the probability of an observation sequence for given model parameters **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: The probability of occurence of the observation sequence :rtype: float **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.forward_algo(observations)) .. note:: No scaling applied here and hence this routine is susceptible to underflow errors. Use :func:`hmm.log_prob` instead.
[ "Finds", "the", "probability", "of", "an", "observation", "sequence", "for", "given", "model", "parameters" ]
6ba6012665f9e09c980ff70901604d051ba57dcc
https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L144-L190
19,933
rahul13ramesh/hidden_markov
hidden_markov/hmm_class.py
hmm.viterbi
def viterbi(self,observations): """ The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations)) """ # Find total states,observations total_stages = len(observations) num_states = len(self.states) # initialize data # Path stores the state sequence giving maximum probability old_path = np.zeros( (total_stages, num_states) ) new_path = np.zeros( (total_stages, num_states) ) # Find initial delta # Map observation to an index # delta[s] stores the probability of most probable path ending in state 's' ob_ind = self.obs_map[ observations[0] ] delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Scale delta delta = delta /np.sum(delta) # initialize path old_path[0,:] = [i for i in range(num_states) ] # Find delta[t][x] for each state 'x' at the iteration 't' # delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path for curr_t in range(1,total_stages): # Map observation to an index ob_ind = self.obs_map[ observations[curr_t] ] # Find temp and take max along each row to get delta temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] ) # Update delta and scale it delta = temp.max(axis = 1).transpose() delta = delta /np.sum(delta) # Find state which is most probable using argax # Convert to a list for easier processing max_temp = temp.argmax(axis=1).transpose() max_temp = np.ravel(max_temp).tolist() # Update path for s in range(num_states): new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ] new_path[curr_t,:] = [i for i in range(num_states) ] old_path = new_path.copy() # Find the state in last stage, giving maximum probability final_max = np.argmax(np.ravel(delta)) best_path = old_path[:,final_max].tolist() best_path_map = [ self.state_map[i] for i in best_path] return best_path_map
python
def viterbi(self,observations): # Find total states,observations total_stages = len(observations) num_states = len(self.states) # initialize data # Path stores the state sequence giving maximum probability old_path = np.zeros( (total_stages, num_states) ) new_path = np.zeros( (total_stages, num_states) ) # Find initial delta # Map observation to an index # delta[s] stores the probability of most probable path ending in state 's' ob_ind = self.obs_map[ observations[0] ] delta = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ) # Scale delta delta = delta /np.sum(delta) # initialize path old_path[0,:] = [i for i in range(num_states) ] # Find delta[t][x] for each state 'x' at the iteration 't' # delta[t][x] can be found using delta[t-1][x] and taking the maximum possible path for curr_t in range(1,total_stages): # Map observation to an index ob_ind = self.obs_map[ observations[curr_t] ] # Find temp and take max along each row to get delta temp = np.multiply (np.multiply(delta , self.trans_prob.transpose()) , self.em_prob[:, ob_ind] ) # Update delta and scale it delta = temp.max(axis = 1).transpose() delta = delta /np.sum(delta) # Find state which is most probable using argax # Convert to a list for easier processing max_temp = temp.argmax(axis=1).transpose() max_temp = np.ravel(max_temp).tolist() # Update path for s in range(num_states): new_path[:curr_t,s] = old_path[0:curr_t, max_temp[s] ] new_path[curr_t,:] = [i for i in range(num_states) ] old_path = new_path.copy() # Find the state in last stage, giving maximum probability final_max = np.argmax(np.ravel(delta)) best_path = old_path[:,final_max].tolist() best_path_map = [ self.state_map[i] for i in best_path] return best_path_map
[ "def", "viterbi", "(", "self", ",", "observations", ")", ":", "# Find total states,observations", "total_stages", "=", "len", "(", "observations", ")", "num_states", "=", "len", "(", "self", ".", "states", ")", "# initialize data", "# Path stores the state sequence gi...
The probability of occurence of the observation sequence **Arguments**: :param observations: The observation sequence, where each element belongs to 'observations' variable declared with __init__ object. :type observations: A list or tuple :return: Returns a list of hidden states. :rtype: list of states **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> print(test.viterbi(observations))
[ "The", "probability", "of", "occurence", "of", "the", "observation", "sequence" ]
6ba6012665f9e09c980ff70901604d051ba57dcc
https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L194-L277
19,934
rahul13ramesh/hidden_markov
hidden_markov/hmm_class.py
hmm.train_hmm
def train_hmm(self,observation_list, iterations, quantities): """ Runs the Baum Welch Algorithm and finds the new model parameters **Arguments**: :param observation_list: A nested list, or a list of lists :type observation_list: Contains a list multiple observation sequences. :param iterations: Maximum number of iterations for the algorithm :type iterations: An integer :param quantities: Number of times, each corresponding item in 'observation_list' occurs. :type quantities: A list of integers :return: Returns the emission, transition and start probabilites as numpy matrices :rtype: Three numpy matices **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> >>> observations = ('A', 'B','B','A') >>> obs4 = ('B', 'A','B') >>> observation_tuple = [] >>> observation_tuple.extend( [observations,obs4] ) >>> quantities_observations = [10, 20] >>> num_iter=1000 >>> e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations) >>> # e,t,s contain new emission transition and start probabilities """ obs_size = len(observation_list) prob = float('inf') q = quantities # Train the model 'iteration' number of times # store em_prob and trans_prob copies since you should use same values for one loop for i in range(iterations): emProbNew = np.asmatrix(np.zeros((self.em_prob.shape))) transProbNew = np.asmatrix(np.zeros((self.trans_prob.shape))) startProbNew = np.asmatrix(np.zeros((self.start_prob.shape))) for j in range(obs_size): # re-assing values based on weight emProbNew= emProbNew + q[j] * self._train_emission(observation_list[j]) transProbNew = transProbNew + q[j] * self._train_transition(observation_list[j]) startProbNew = startProbNew + q[j] * self._train_start_prob(observation_list[j]) # Normalizing em_norm = emProbNew.sum(axis = 1) trans_norm = transProbNew.sum(axis = 1) start_norm = startProbNew.sum(axis = 1) emProbNew = emProbNew/ em_norm.transpose() startProbNew = startProbNew/ start_norm.transpose() transProbNew = transProbNew/ trans_norm.transpose() self.em_prob,self.trans_prob = emProbNew,transProbNew self.start_prob = startProbNew if prob - self.log_prob(observation_list,quantities)>0.0000001: prob = self.log_prob(observation_list,quantities) else: return self.em_prob, self.trans_prob , self.start_prob return self.em_prob, self.trans_prob , self.start_prob
python
def train_hmm(self,observation_list, iterations, quantities): obs_size = len(observation_list) prob = float('inf') q = quantities # Train the model 'iteration' number of times # store em_prob and trans_prob copies since you should use same values for one loop for i in range(iterations): emProbNew = np.asmatrix(np.zeros((self.em_prob.shape))) transProbNew = np.asmatrix(np.zeros((self.trans_prob.shape))) startProbNew = np.asmatrix(np.zeros((self.start_prob.shape))) for j in range(obs_size): # re-assing values based on weight emProbNew= emProbNew + q[j] * self._train_emission(observation_list[j]) transProbNew = transProbNew + q[j] * self._train_transition(observation_list[j]) startProbNew = startProbNew + q[j] * self._train_start_prob(observation_list[j]) # Normalizing em_norm = emProbNew.sum(axis = 1) trans_norm = transProbNew.sum(axis = 1) start_norm = startProbNew.sum(axis = 1) emProbNew = emProbNew/ em_norm.transpose() startProbNew = startProbNew/ start_norm.transpose() transProbNew = transProbNew/ trans_norm.transpose() self.em_prob,self.trans_prob = emProbNew,transProbNew self.start_prob = startProbNew if prob - self.log_prob(observation_list,quantities)>0.0000001: prob = self.log_prob(observation_list,quantities) else: return self.em_prob, self.trans_prob , self.start_prob return self.em_prob, self.trans_prob , self.start_prob
[ "def", "train_hmm", "(", "self", ",", "observation_list", ",", "iterations", ",", "quantities", ")", ":", "obs_size", "=", "len", "(", "observation_list", ")", "prob", "=", "float", "(", "'inf'", ")", "q", "=", "quantities", "# Train the model 'iteration' number...
Runs the Baum Welch Algorithm and finds the new model parameters **Arguments**: :param observation_list: A nested list, or a list of lists :type observation_list: Contains a list multiple observation sequences. :param iterations: Maximum number of iterations for the algorithm :type iterations: An integer :param quantities: Number of times, each corresponding item in 'observation_list' occurs. :type quantities: A list of integers :return: Returns the emission, transition and start probabilites as numpy matrices :rtype: Three numpy matices **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> >>> observations = ('A', 'B','B','A') >>> obs4 = ('B', 'A','B') >>> observation_tuple = [] >>> observation_tuple.extend( [observations,obs4] ) >>> quantities_observations = [10, 20] >>> num_iter=1000 >>> e,t,s = test.train_hmm(observation_tuple,num_iter,quantities_observations) >>> # e,t,s contain new emission transition and start probabilities
[ "Runs", "the", "Baum", "Welch", "Algorithm", "and", "finds", "the", "new", "model", "parameters" ]
6ba6012665f9e09c980ff70901604d051ba57dcc
https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L281-L363
19,935
rahul13ramesh/hidden_markov
hidden_markov/hmm_class.py
hmm.log_prob
def log_prob(self,observations_list, quantities): """ Finds Weighted log probability of a list of observation sequences **Arguments**: :param observation_list: A nested list, or a list of lists :type observation_list: Contains a list multiple observation sequences. :param quantities: Number of times, each corresponding item in 'observation_list' occurs. :type quantities: A list of integers :return: Weighted log probability of multiple observations. :rtype: float **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> obs4 = ('B', 'A','B') >>> observation_tuple = [] >>> observation_tuple.extend( [observations,obs4] ) >>> quantities_observations = [10, 20] >>> >>> prob = test.log_prob(observation_tuple, quantities_observations) """ prob = 0 for q,obs in enumerate(observations_list): temp,c_scale = self._alpha_cal(obs) prob = prob + -1 * quantities[q] * np.sum(np.log(c_scale)) return prob
python
def log_prob(self,observations_list, quantities): prob = 0 for q,obs in enumerate(observations_list): temp,c_scale = self._alpha_cal(obs) prob = prob + -1 * quantities[q] * np.sum(np.log(c_scale)) return prob
[ "def", "log_prob", "(", "self", ",", "observations_list", ",", "quantities", ")", ":", "prob", "=", "0", "for", "q", ",", "obs", "in", "enumerate", "(", "observations_list", ")", ":", "temp", ",", "c_scale", "=", "self", ".", "_alpha_cal", "(", "obs", ...
Finds Weighted log probability of a list of observation sequences **Arguments**: :param observation_list: A nested list, or a list of lists :type observation_list: Contains a list multiple observation sequences. :param quantities: Number of times, each corresponding item in 'observation_list' occurs. :type quantities: A list of integers :return: Weighted log probability of multiple observations. :rtype: float **Features**: Scaling applied here. This ensures that no underflow error occurs. **Example**: >>> states = ('s', 't') >>> possible_observation = ('A','B' ) >>> # Numpy arrays of the data >>> start_probability = np.matrix( '0.5 0.5 ') >>> transition_probability = np.matrix('0.6 0.4 ; 0.3 0.7 ') >>> emission_probability = np.matrix( '0.3 0.7 ; 0.4 0.6 ' ) >>> # Initialize class object >>> test = hmm(states,possible_observation,start_probability,transition_probability,emission_probability) >>> observations = ('A', 'B','B','A') >>> obs4 = ('B', 'A','B') >>> observation_tuple = [] >>> observation_tuple.extend( [observations,obs4] ) >>> quantities_observations = [10, 20] >>> >>> prob = test.log_prob(observation_tuple, quantities_observations)
[ "Finds", "Weighted", "log", "probability", "of", "a", "list", "of", "observation", "sequences" ]
6ba6012665f9e09c980ff70901604d051ba57dcc
https://github.com/rahul13ramesh/hidden_markov/blob/6ba6012665f9e09c980ff70901604d051ba57dcc/hidden_markov/hmm_class.py#L513-L555
19,936
mortada/fredapi
fredapi/fred.py
Fred.__fetch_data
def __fetch_data(self, url): """ helper function for fetching data given a request URL """ url += '&api_key=' + self.api_key try: response = urlopen(url) root = ET.fromstring(response.read()) except HTTPError as exc: root = ET.fromstring(exc.read()) raise ValueError(root.get('message')) return root
python
def __fetch_data(self, url): url += '&api_key=' + self.api_key try: response = urlopen(url) root = ET.fromstring(response.read()) except HTTPError as exc: root = ET.fromstring(exc.read()) raise ValueError(root.get('message')) return root
[ "def", "__fetch_data", "(", "self", ",", "url", ")", ":", "url", "+=", "'&api_key='", "+", "self", ".", "api_key", "try", ":", "response", "=", "urlopen", "(", "url", ")", "root", "=", "ET", ".", "fromstring", "(", "response", ".", "read", "(", ")", ...
helper function for fetching data given a request URL
[ "helper", "function", "for", "fetching", "data", "given", "a", "request", "URL" ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L58-L69
19,937
mortada/fredapi
fredapi/fred.py
Fred._parse
def _parse(self, date_str, format='%Y-%m-%d'): """ helper function for parsing FRED date string into datetime """ rv = pd.to_datetime(date_str, format=format) if hasattr(rv, 'to_pydatetime'): rv = rv.to_pydatetime() return rv
python
def _parse(self, date_str, format='%Y-%m-%d'): rv = pd.to_datetime(date_str, format=format) if hasattr(rv, 'to_pydatetime'): rv = rv.to_pydatetime() return rv
[ "def", "_parse", "(", "self", ",", "date_str", ",", "format", "=", "'%Y-%m-%d'", ")", ":", "rv", "=", "pd", ".", "to_datetime", "(", "date_str", ",", "format", "=", "format", ")", "if", "hasattr", "(", "rv", ",", "'to_pydatetime'", ")", ":", "rv", "=...
helper function for parsing FRED date string into datetime
[ "helper", "function", "for", "parsing", "FRED", "date", "string", "into", "datetime" ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L71-L78
19,938
mortada/fredapi
fredapi/fred.py
Fred.get_series_first_release
def get_series_first_release(self, series_id): """ Get first-release data for a Fred series id. This ignores any revision to the data series. For instance, The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0. This will ignore revisions after the first release. Parameters ---------- series_id : str Fred series id such as 'GDP' Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series """ df = self.get_series_all_releases(series_id) first_release = df.groupby('date').head(1) data = first_release.set_index('date')['value'] return data
python
def get_series_first_release(self, series_id): df = self.get_series_all_releases(series_id) first_release = df.groupby('date').head(1) data = first_release.set_index('date')['value'] return data
[ "def", "get_series_first_release", "(", "self", ",", "series_id", ")", ":", "df", "=", "self", ".", "get_series_all_releases", "(", "series_id", ")", "first_release", "=", "df", ".", "groupby", "(", "'date'", ")", ".", "head", "(", "1", ")", "data", "=", ...
Get first-release data for a Fred series id. This ignores any revision to the data series. For instance, The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0. This will ignore revisions after the first release. Parameters ---------- series_id : str Fred series id such as 'GDP' Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series
[ "Get", "first", "-", "release", "data", "for", "a", "Fred", "series", "id", ".", "This", "ignores", "any", "revision", "to", "the", "data", "series", ".", "For", "instance", "The", "US", "GDP", "for", "Q1", "2014", "was", "first", "released", "to", "be...
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L160-L179
19,939
mortada/fredapi
fredapi/fred.py
Fred.get_series_as_of_date
def get_series_as_of_date(self, series_id, as_of_date): """ Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series before or on as_of_date, but ignores any revision on dates after as_of_date. Parameters ---------- series_id : str Fred series id such as 'GDP' as_of_date : datetime, or datetime-like str such as '10/25/2014' Include data revisions on or before this date, and ignore revisions afterwards Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series """ as_of_date = pd.to_datetime(as_of_date) df = self.get_series_all_releases(series_id) data = df[df['realtime_start'] <= as_of_date] return data
python
def get_series_as_of_date(self, series_id, as_of_date): as_of_date = pd.to_datetime(as_of_date) df = self.get_series_all_releases(series_id) data = df[df['realtime_start'] <= as_of_date] return data
[ "def", "get_series_as_of_date", "(", "self", ",", "series_id", ",", "as_of_date", ")", ":", "as_of_date", "=", "pd", ".", "to_datetime", "(", "as_of_date", ")", "df", "=", "self", ".", "get_series_all_releases", "(", "series_id", ")", "data", "=", "df", "[",...
Get latest data for a Fred series id as known on a particular date. This includes any revision to the data series before or on as_of_date, but ignores any revision on dates after as_of_date. Parameters ---------- series_id : str Fred series id such as 'GDP' as_of_date : datetime, or datetime-like str such as '10/25/2014' Include data revisions on or before this date, and ignore revisions afterwards Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series
[ "Get", "latest", "data", "for", "a", "Fred", "series", "id", "as", "known", "on", "a", "particular", "date", ".", "This", "includes", "any", "revision", "to", "the", "data", "series", "before", "or", "on", "as_of_date", "but", "ignores", "any", "revision",...
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L181-L201
19,940
mortada/fredapi
fredapi/fred.py
Fred.get_series_vintage_dates
def get_series_vintage_dates(self, series_id): """ Get a list of vintage dates for a series. Vintage dates are the dates in history when a series' data values were revised or new data values were released. Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' Returns ------- dates : list list of vintage dates """ url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id) root = self.__fetch_data(url) if root is None: raise ValueError('No vintage date exists for series id: ' + series_id) dates = [] for child in root.getchildren(): dates.append(self._parse(child.text)) return dates
python
def get_series_vintage_dates(self, series_id): url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id) root = self.__fetch_data(url) if root is None: raise ValueError('No vintage date exists for series id: ' + series_id) dates = [] for child in root.getchildren(): dates.append(self._parse(child.text)) return dates
[ "def", "get_series_vintage_dates", "(", "self", ",", "series_id", ")", ":", "url", "=", "\"%s/series/vintagedates?series_id=%s\"", "%", "(", "self", ".", "root_url", ",", "series_id", ")", "root", "=", "self", ".", "__fetch_data", "(", "url", ")", "if", "root"...
Get a list of vintage dates for a series. Vintage dates are the dates in history when a series' data values were revised or new data values were released. Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' Returns ------- dates : list list of vintage dates
[ "Get", "a", "list", "of", "vintage", "dates", "for", "a", "series", ".", "Vintage", "dates", "are", "the", "dates", "in", "history", "when", "a", "series", "data", "values", "were", "revised", "or", "new", "data", "values", "were", "released", "." ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L250-L272
19,941
mortada/fredapi
fredapi/fred.py
Fred.__do_series_search
def __do_series_search(self, url): """ helper function for making one HTTP request for data, and parsing the returned results into a DataFrame """ root = self.__fetch_data(url) series_ids = [] data = {} num_results_returned = 0 # number of results returned in this HTTP request num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned for child in root.getchildren(): num_results_returned += 1 series_id = child.get('id') series_ids.append(series_id) data[series_id] = {"id": series_id} fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end", "frequency", "frequency_short", "units", "units_short", "seasonal_adjustment", "seasonal_adjustment_short", "last_updated", "popularity", "notes"] for field in fields: data[series_id][field] = child.get(field) if num_results_returned > 0: data = pd.DataFrame(data, columns=series_ids).T # parse datetime columns for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]: data[field] = data[field].apply(self._parse, format=None) # set index name data.index.name = 'series id' else: data = None return data, num_results_total
python
def __do_series_search(self, url): root = self.__fetch_data(url) series_ids = [] data = {} num_results_returned = 0 # number of results returned in this HTTP request num_results_total = int(root.get('count')) # total number of results, this can be larger than number of results returned for child in root.getchildren(): num_results_returned += 1 series_id = child.get('id') series_ids.append(series_id) data[series_id] = {"id": series_id} fields = ["realtime_start", "realtime_end", "title", "observation_start", "observation_end", "frequency", "frequency_short", "units", "units_short", "seasonal_adjustment", "seasonal_adjustment_short", "last_updated", "popularity", "notes"] for field in fields: data[series_id][field] = child.get(field) if num_results_returned > 0: data = pd.DataFrame(data, columns=series_ids).T # parse datetime columns for field in ["realtime_start", "realtime_end", "observation_start", "observation_end", "last_updated"]: data[field] = data[field].apply(self._parse, format=None) # set index name data.index.name = 'series id' else: data = None return data, num_results_total
[ "def", "__do_series_search", "(", "self", ",", "url", ")", ":", "root", "=", "self", ".", "__fetch_data", "(", "url", ")", "series_ids", "=", "[", "]", "data", "=", "{", "}", "num_results_returned", "=", "0", "# number of results returned in this HTTP request", ...
helper function for making one HTTP request for data, and parsing the returned results into a DataFrame
[ "helper", "function", "for", "making", "one", "HTTP", "request", "for", "data", "and", "parsing", "the", "returned", "results", "into", "a", "DataFrame" ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L274-L305
19,942
mortada/fredapi
fredapi/fred.py
Fred.__get_search_results
def __get_search_results(self, url, limit, order_by, sort_order, filter): """ helper function for getting search results up to specified limit on the number of results. The Fred HTTP API truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data. """ order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity'] if order_by is not None: if order_by in order_by_options: url = url + '&order_by=' + order_by else: raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options))) if filter is not None: if len(filter) == 2: url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1]) else: raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)') sort_order_options = ['asc', 'desc'] if sort_order is not None: if sort_order in sort_order_options: url = url + '&sort_order=' + sort_order else: raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options))) data, num_results_total = self.__do_series_search(url) if data is None: return data if limit == 0: max_results_needed = num_results_total else: max_results_needed = limit if max_results_needed > self.max_results_per_request: for i in range(1, max_results_needed // self.max_results_per_request + 1): offset = i * self.max_results_per_request next_data, _ = self.__do_series_search(url + '&offset=' + str(offset)) data = data.append(next_data) return data.head(max_results_needed)
python
def __get_search_results(self, url, limit, order_by, sort_order, filter): order_by_options = ['search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity'] if order_by is not None: if order_by in order_by_options: url = url + '&order_by=' + order_by else: raise ValueError('%s is not in the valid list of order_by options: %s' % (order_by, str(order_by_options))) if filter is not None: if len(filter) == 2: url = url + '&filter_variable=%s&filter_value=%s' % (filter[0], filter[1]) else: raise ValueError('Filter should be a 2 item tuple like (filter_variable, filter_value)') sort_order_options = ['asc', 'desc'] if sort_order is not None: if sort_order in sort_order_options: url = url + '&sort_order=' + sort_order else: raise ValueError('%s is not in the valid list of sort_order options: %s' % (sort_order, str(sort_order_options))) data, num_results_total = self.__do_series_search(url) if data is None: return data if limit == 0: max_results_needed = num_results_total else: max_results_needed = limit if max_results_needed > self.max_results_per_request: for i in range(1, max_results_needed // self.max_results_per_request + 1): offset = i * self.max_results_per_request next_data, _ = self.__do_series_search(url + '&offset=' + str(offset)) data = data.append(next_data) return data.head(max_results_needed)
[ "def", "__get_search_results", "(", "self", ",", "url", ",", "limit", ",", "order_by", ",", "sort_order", ",", "filter", ")", ":", "order_by_options", "=", "[", "'search_rank'", ",", "'series_id'", ",", "'title'", ",", "'units'", ",", "'frequency'", ",", "'s...
helper function for getting search results up to specified limit on the number of results. The Fred HTTP API truncates to 1000 results per request, so this may issue multiple HTTP requests to obtain more available data.
[ "helper", "function", "for", "getting", "search", "results", "up", "to", "specified", "limit", "on", "the", "number", "of", "results", ".", "The", "Fred", "HTTP", "API", "truncates", "to", "1000", "results", "per", "request", "so", "this", "may", "issue", ...
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L307-L349
19,943
mortada/fredapi
fredapi/fred.py
Fred.search
def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None): """ Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame. Parameters ---------- text : str text to do fulltext search on, e.g., 'Real GDP' limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/series/search?search_text=%s&" % (self.root_url, quote_plus(text)) info = self.__get_search_results(url, limit, order_by, sort_order, filter) return info
python
def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None): url = "%s/series/search?search_text=%s&" % (self.root_url, quote_plus(text)) info = self.__get_search_results(url, limit, order_by, sort_order, filter) return info
[ "def", "search", "(", "self", ",", "text", ",", "limit", "=", "1000", ",", "order_by", "=", "None", ",", "sort_order", "=", "None", ",", "filter", "=", "None", ")", ":", "url", "=", "\"%s/series/search?search_text=%s&\"", "%", "(", "self", ".", "root_url...
Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame. Parameters ---------- text : str text to do fulltext search on, e.g., 'Real GDP' limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series
[ "Do", "a", "fulltext", "search", "for", "series", "in", "the", "Fred", "dataset", ".", "Returns", "information", "about", "matching", "series", "in", "a", "DataFrame", "." ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L351-L379
19,944
mortada/fredapi
fredapi/fred.py
Fred.search_by_release
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None): """ Search for series that belongs to a release id. Returns information about matching series in a DataFrame. Parameters ---------- release_id : int release id, e.g., 151 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/release/series?release_id=%d" % (self.root_url, release_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for release id: ' + str(release_id)) return info
python
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None): url = "%s/release/series?release_id=%d" % (self.root_url, release_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for release id: ' + str(release_id)) return info
[ "def", "search_by_release", "(", "self", ",", "release_id", ",", "limit", "=", "0", ",", "order_by", "=", "None", ",", "sort_order", "=", "None", ",", "filter", "=", "None", ")", ":", "url", "=", "\"%s/release/series?release_id=%d\"", "%", "(", "self", "."...
Search for series that belongs to a release id. Returns information about matching series in a DataFrame. Parameters ---------- release_id : int release id, e.g., 151 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series
[ "Search", "for", "series", "that", "belongs", "to", "a", "release", "id", ".", "Returns", "information", "about", "matching", "series", "in", "a", "DataFrame", "." ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L381-L410
19,945
mortada/fredapi
fredapi/fred.py
Fred.search_by_category
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None): """ Search for series that belongs to a category id. Returns information about matching series in a DataFrame. Parameters ---------- category_id : int category id, e.g., 32145 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/category/series?category_id=%d&" % (self.root_url, category_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for category id: ' + str(category_id)) return info
python
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None): url = "%s/category/series?category_id=%d&" % (self.root_url, category_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for category id: ' + str(category_id)) return info
[ "def", "search_by_category", "(", "self", ",", "category_id", ",", "limit", "=", "0", ",", "order_by", "=", "None", ",", "sort_order", "=", "None", ",", "filter", "=", "None", ")", ":", "url", "=", "\"%s/category/series?category_id=%d&\"", "%", "(", "self", ...
Search for series that belongs to a category id. Returns information about matching series in a DataFrame. Parameters ---------- category_id : int category id, e.g., 32145 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series
[ "Search", "for", "series", "that", "belongs", "to", "a", "category", "id", ".", "Returns", "information", "about", "matching", "series", "in", "a", "DataFrame", "." ]
d3ca79efccb9525f2752a0d6da90e793e87c3fd8
https://github.com/mortada/fredapi/blob/d3ca79efccb9525f2752a0d6da90e793e87c3fd8/fredapi/fred.py#L412-L442
19,946
mathiasertl/django-ca
ca/django_ca/managers.py
CertificateManager.init
def init(self, ca, csr, **kwargs): """Create a signed certificate from a CSR and store it to the database. All parameters are passed on to :py:func:`Certificate.objects.sign_cert() <django_ca.managers.CertificateManager.sign_cert>`. """ c = self.model(ca=ca) c.x509, csr = self.sign_cert(ca, csr, **kwargs) c.csr = csr.public_bytes(Encoding.PEM).decode('utf-8') c.save() post_issue_cert.send(sender=self.model, cert=c) return c
python
def init(self, ca, csr, **kwargs): c = self.model(ca=ca) c.x509, csr = self.sign_cert(ca, csr, **kwargs) c.csr = csr.public_bytes(Encoding.PEM).decode('utf-8') c.save() post_issue_cert.send(sender=self.model, cert=c) return c
[ "def", "init", "(", "self", ",", "ca", ",", "csr", ",", "*", "*", "kwargs", ")", ":", "c", "=", "self", ".", "model", "(", "ca", "=", "ca", ")", "c", ".", "x509", ",", "csr", "=", "self", ".", "sign_cert", "(", "ca", ",", "csr", ",", "*", ...
Create a signed certificate from a CSR and store it to the database. All parameters are passed on to :py:func:`Certificate.objects.sign_cert() <django_ca.managers.CertificateManager.sign_cert>`.
[ "Create", "a", "signed", "certificate", "from", "a", "CSR", "and", "store", "it", "to", "the", "database", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/managers.py#L442-L455
19,947
mathiasertl/django-ca
ca/django_ca/admin.py
CertificateMixin.download_bundle_view
def download_bundle_view(self, request, pk): """A view that allows the user to download a certificate bundle in PEM format.""" return self._download_response(request, pk, bundle=True)
python
def download_bundle_view(self, request, pk): return self._download_response(request, pk, bundle=True)
[ "def", "download_bundle_view", "(", "self", ",", "request", ",", "pk", ")", ":", "return", "self", ".", "_download_response", "(", "request", ",", "pk", ",", "bundle", "=", "True", ")" ]
A view that allows the user to download a certificate bundle in PEM format.
[ "A", "view", "that", "allows", "the", "user", "to", "download", "a", "certificate", "bundle", "in", "PEM", "format", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/admin.py#L118-L121
19,948
mathiasertl/django-ca
ca/django_ca/admin.py
CertificateMixin.get_actions
def get_actions(self, request): """Disable the "delete selected" admin action. Otherwise the action is present even though has_delete_permission is False, it just doesn't work. """ actions = super(CertificateMixin, self).get_actions(request) actions.pop('delete_selected', '') return actions
python
def get_actions(self, request): actions = super(CertificateMixin, self).get_actions(request) actions.pop('delete_selected', '') return actions
[ "def", "get_actions", "(", "self", ",", "request", ")", ":", "actions", "=", "super", "(", "CertificateMixin", ",", "self", ")", ".", "get_actions", "(", "request", ")", "actions", ".", "pop", "(", "'delete_selected'", ",", "''", ")", "return", "actions" ]
Disable the "delete selected" admin action. Otherwise the action is present even though has_delete_permission is False, it just doesn't work.
[ "Disable", "the", "delete", "selected", "admin", "action", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/admin.py#L126-L134
19,949
mathiasertl/django-ca
ca/django_ca/profiles.py
get_cert_profile_kwargs
def get_cert_profile_kwargs(name=None): """Get kwargs suitable for get_cert X509 keyword arguments from the given profile.""" if name is None: name = ca_settings.CA_DEFAULT_PROFILE profile = deepcopy(ca_settings.CA_PROFILES[name]) kwargs = { 'cn_in_san': profile['cn_in_san'], 'subject': get_default_subject(name=name), } key_usage = profile.get('keyUsage') if key_usage and key_usage.get('value'): kwargs['key_usage'] = KeyUsage(key_usage) ext_key_usage = profile.get('extendedKeyUsage') if ext_key_usage and ext_key_usage.get('value'): kwargs['extended_key_usage'] = ExtendedKeyUsage(ext_key_usage) tls_feature = profile.get('TLSFeature') if tls_feature and tls_feature.get('value'): kwargs['tls_feature'] = TLSFeature(tls_feature) if profile.get('ocsp_no_check'): kwargs['ocsp_no_check'] = profile['ocsp_no_check'] return kwargs
python
def get_cert_profile_kwargs(name=None): if name is None: name = ca_settings.CA_DEFAULT_PROFILE profile = deepcopy(ca_settings.CA_PROFILES[name]) kwargs = { 'cn_in_san': profile['cn_in_san'], 'subject': get_default_subject(name=name), } key_usage = profile.get('keyUsage') if key_usage and key_usage.get('value'): kwargs['key_usage'] = KeyUsage(key_usage) ext_key_usage = profile.get('extendedKeyUsage') if ext_key_usage and ext_key_usage.get('value'): kwargs['extended_key_usage'] = ExtendedKeyUsage(ext_key_usage) tls_feature = profile.get('TLSFeature') if tls_feature and tls_feature.get('value'): kwargs['tls_feature'] = TLSFeature(tls_feature) if profile.get('ocsp_no_check'): kwargs['ocsp_no_check'] = profile['ocsp_no_check'] return kwargs
[ "def", "get_cert_profile_kwargs", "(", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "name", "=", "ca_settings", ".", "CA_DEFAULT_PROFILE", "profile", "=", "deepcopy", "(", "ca_settings", ".", "CA_PROFILES", "[", "name", "]", ")", "kwargs",...
Get kwargs suitable for get_cert X509 keyword arguments from the given profile.
[ "Get", "kwargs", "suitable", "for", "get_cert", "X509", "keyword", "arguments", "from", "the", "given", "profile", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/profiles.py#L25-L49
19,950
mathiasertl/django-ca
ca/django_ca/utils.py
format_name
def format_name(subject): """Convert a subject into the canonical form for distinguished names. This function does not take care of sorting the subject in any meaningful order. Examples:: >>> format_name([('CN', 'example.com'), ]) '/CN=example.com' >>> format_name([('CN', 'example.com'), ('O', "My Organization"), ]) '/CN=example.com/O=My Organization' """ if isinstance(subject, x509.Name): subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject] return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject]))
python
def format_name(subject): if isinstance(subject, x509.Name): subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject] return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject]))
[ "def", "format_name", "(", "subject", ")", ":", "if", "isinstance", "(", "subject", ",", "x509", ".", "Name", ")", ":", "subject", "=", "[", "(", "OID_NAME_MAPPINGS", "[", "s", ".", "oid", "]", ",", "s", ".", "value", ")", "for", "s", "in", "subjec...
Convert a subject into the canonical form for distinguished names. This function does not take care of sorting the subject in any meaningful order. Examples:: >>> format_name([('CN', 'example.com'), ]) '/CN=example.com' >>> format_name([('CN', 'example.com'), ('O', "My Organization"), ]) '/CN=example.com/O=My Organization'
[ "Convert", "a", "subject", "into", "the", "canonical", "form", "for", "distinguished", "names", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L125-L140
19,951
mathiasertl/django-ca
ca/django_ca/utils.py
format_general_name
def format_general_name(name): """Format a single general name. >>> import ipaddress >>> format_general_name(x509.DNSName('example.com')) 'DNS:example.com' >>> format_general_name(x509.IPAddress(ipaddress.IPv4Address('127.0.0.1'))) 'IP:127.0.0.1' """ if isinstance(name, x509.DirectoryName): value = format_name(name.value) else: value = name.value return '%s:%s' % (SAN_NAME_MAPPINGS[type(name)], value)
python
def format_general_name(name): if isinstance(name, x509.DirectoryName): value = format_name(name.value) else: value = name.value return '%s:%s' % (SAN_NAME_MAPPINGS[type(name)], value)
[ "def", "format_general_name", "(", "name", ")", ":", "if", "isinstance", "(", "name", ",", "x509", ".", "DirectoryName", ")", ":", "value", "=", "format_name", "(", "name", ".", "value", ")", "else", ":", "value", "=", "name", ".", "value", "return", "...
Format a single general name. >>> import ipaddress >>> format_general_name(x509.DNSName('example.com')) 'DNS:example.com' >>> format_general_name(x509.IPAddress(ipaddress.IPv4Address('127.0.0.1'))) 'IP:127.0.0.1'
[ "Format", "a", "single", "general", "name", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L143-L157
19,952
mathiasertl/django-ca
ca/django_ca/utils.py
add_colons
def add_colons(s): """Add colons after every second digit. This function is used in functions to prettify serials. >>> add_colons('teststring') 'te:st:st:ri:ng' """ return ':'.join([s[i:i + 2] for i in range(0, len(s), 2)])
python
def add_colons(s): return ':'.join([s[i:i + 2] for i in range(0, len(s), 2)])
[ "def", "add_colons", "(", "s", ")", ":", "return", "':'", ".", "join", "(", "[", "s", "[", "i", ":", "i", "+", "2", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "s", ")", ",", "2", ")", "]", ")" ]
Add colons after every second digit. This function is used in functions to prettify serials. >>> add_colons('teststring') 'te:st:st:ri:ng'
[ "Add", "colons", "after", "every", "second", "digit", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L200-L208
19,953
mathiasertl/django-ca
ca/django_ca/utils.py
int_to_hex
def int_to_hex(i): """Create a hex-representation of the given serial. >>> int_to_hex(12345678) 'BC:61:4E' """ s = hex(i)[2:].upper() if six.PY2 is True and isinstance(i, long): # pragma: only py2 # NOQA # Strip the "L" suffix, since hex(1L) -> 0x1L. # NOTE: Do not convert to int earlier. int(<very-large-long>) is still long s = s[:-1] return add_colons(s)
python
def int_to_hex(i): s = hex(i)[2:].upper() if six.PY2 is True and isinstance(i, long): # pragma: only py2 # NOQA # Strip the "L" suffix, since hex(1L) -> 0x1L. # NOTE: Do not convert to int earlier. int(<very-large-long>) is still long s = s[:-1] return add_colons(s)
[ "def", "int_to_hex", "(", "i", ")", ":", "s", "=", "hex", "(", "i", ")", "[", "2", ":", "]", ".", "upper", "(", ")", "if", "six", ".", "PY2", "is", "True", "and", "isinstance", "(", "i", ",", "long", ")", ":", "# pragma: only py2 # NOQA", "# Str...
Create a hex-representation of the given serial. >>> int_to_hex(12345678) 'BC:61:4E'
[ "Create", "a", "hex", "-", "representation", "of", "the", "given", "serial", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L211-L222
19,954
mathiasertl/django-ca
ca/django_ca/utils.py
parse_name
def parse_name(name): """Parses a subject string as used in OpenSSLs command line utilities. The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example ``/C=AT/L=Vienna/CN=example.com/emailAddress=user@example.com``. The function does its best to be lenient on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``, whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``). >>> parse_name('/CN=example.com') [('CN', 'example.com')] >>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com') [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')] Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted based on x509 name specifications regardless of the given order: >>> parse_name('L="Vienna / District"/EMAILaddress=user@example.com') [('L', 'Vienna / District'), ('emailAddress', 'user@example.com')] >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT') True Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes, so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected. >>> parse_name('L="Vienna / District"/CN=example.com') [('L', 'Vienna / District'), ('CN', 'example.com')] But note that it's still easy to trick this function, if you really want to. The following example is *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's certainly different: >>> parse_name('L="Vienna " District"/CN=example.com') [('L', 'Vienna'), ('CN', 'example.com')] Examples of where this string is used are: .. code-block:: console # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com' # openssl x509 -in cert.pem -noout -subject -nameopt compat /C=AT/L=Vienna/CN=example.com """ name = name.strip() if not name: # empty subjects are ok return [] try: items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)] except KeyError as e: raise ValueError('Unknown x509 name field: %s' % e.args[0]) # Check that no OIDs not in MULTIPLE_OIDS occur more then once for key, oid in NAME_OID_MAPPINGS.items(): if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS: raise ValueError('Subject contains multiple "%s" fields' % key) return sort_name(items)
python
def parse_name(name): name = name.strip() if not name: # empty subjects are ok return [] try: items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)] except KeyError as e: raise ValueError('Unknown x509 name field: %s' % e.args[0]) # Check that no OIDs not in MULTIPLE_OIDS occur more then once for key, oid in NAME_OID_MAPPINGS.items(): if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS: raise ValueError('Subject contains multiple "%s" fields' % key) return sort_name(items)
[ "def", "parse_name", "(", "name", ")", ":", "name", "=", "name", ".", "strip", "(", ")", "if", "not", "name", ":", "# empty subjects are ok", "return", "[", "]", "try", ":", "items", "=", "[", "(", "NAME_CASE_MAPPINGS", "[", "t", "[", "0", "]", ".", ...
Parses a subject string as used in OpenSSLs command line utilities. The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example ``/C=AT/L=Vienna/CN=example.com/emailAddress=user@example.com``. The function does its best to be lenient on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``, whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``). >>> parse_name('/CN=example.com') [('CN', 'example.com')] >>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com') [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')] Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted based on x509 name specifications regardless of the given order: >>> parse_name('L="Vienna / District"/EMAILaddress=user@example.com') [('L', 'Vienna / District'), ('emailAddress', 'user@example.com')] >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT') True Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes, so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected. >>> parse_name('L="Vienna / District"/CN=example.com') [('L', 'Vienna / District'), ('CN', 'example.com')] But note that it's still easy to trick this function, if you really want to. The following example is *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's certainly different: >>> parse_name('L="Vienna " District"/CN=example.com') [('L', 'Vienna'), ('CN', 'example.com')] Examples of where this string is used are: .. code-block:: console # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com' # openssl x509 -in cert.pem -noout -subject -nameopt compat /C=AT/L=Vienna/CN=example.com
[ "Parses", "a", "subject", "string", "as", "used", "in", "OpenSSLs", "command", "line", "utilities", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L245-L301
19,955
mathiasertl/django-ca
ca/django_ca/utils.py
parse_general_name
def parse_general_name(name): """Parse a general name from user input. This function will do its best to detect the intended type of any value passed to it: >>> parse_general_name('example.com') <DNSName(value='example.com')> >>> parse_general_name('*.example.com') <DNSName(value='*.example.com')> >>> parse_general_name('.example.com') # Syntax used e.g. for NameConstraints: All levels of subdomains <DNSName(value='.example.com')> >>> parse_general_name('user@example.com') <RFC822Name(value='user@example.com')> >>> parse_general_name('https://example.com') <UniformResourceIdentifier(value='https://example.com')> >>> parse_general_name('1.2.3.4') <IPAddress(value=1.2.3.4)> >>> parse_general_name('fd00::1') <IPAddress(value=fd00::1)> >>> parse_general_name('/CN=example.com') <DirectoryName(value=<Name(CN=example.com)>)> The default fallback is to assume a :py:class:`~cg:cryptography.x509.DNSName`. If this doesn't work, an exception will be raised: >>> parse_general_name('foo..bar`*123') # doctest: +ELLIPSIS Traceback (most recent call last): ... idna.core.IDNAError: ... If you want to override detection, you can prefix the name to match :py:const:`GENERAL_NAME_RE`: >>> parse_general_name('email:user@example.com') <RFC822Name(value='user@example.com')> >>> parse_general_name('URI:https://example.com') <UniformResourceIdentifier(value='https://example.com')> >>> parse_general_name('dirname:/CN=example.com') <DirectoryName(value=<Name(CN=example.com)>)> Some more exotic values can only be generated by using this prefix: >>> parse_general_name('rid:2.5.4.3') <RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>)> >>> parse_general_name('otherName:2.5.4.3;UTF8:example.com') <OtherName(type_id=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=b'example.com')> If you give a prefixed value, this function is less forgiving of any typos and does not catch any exceptions: >>> parse_general_name('email:foo@bar com') Traceback (most recent call last): ... ValueError: Invalid domain: bar com """ name = force_text(name) typ = None match = GENERAL_NAME_RE.match(name) if match is not None: typ, name = match.groups() typ = typ.lower() if typ is None: if re.match('[a-z0-9]{2,}://', name): # Looks like a URI try: return x509.UniformResourceIdentifier(name) except Exception: # pragma: no cover - this really accepts anything pass if '@' in name: # Looks like an Email address try: return x509.RFC822Name(validate_email(name)) except Exception: pass if name.strip().startswith('/'): # maybe it's a dirname? return x509.DirectoryName(x509_name(name)) # Try to parse this as IPAddress/Network try: return x509.IPAddress(ip_address(name)) except ValueError: pass try: return x509.IPAddress(ip_network(name)) except ValueError: pass # Try to encode as domain name. DNSName() does not validate the domain name, but this check will fail. if name.startswith('*.'): idna.encode(name[2:]) elif name.startswith('.'): idna.encode(name[1:]) else: idna.encode(name) # Almost anything passes as DNS name, so this is our default fallback return x509.DNSName(name) if typ == 'uri': return x509.UniformResourceIdentifier(name) elif typ == 'email': return x509.RFC822Name(validate_email(name)) elif typ == 'ip': try: return x509.IPAddress(ip_address(name)) except ValueError: pass try: return x509.IPAddress(ip_network(name)) except ValueError: pass raise ValueError('Could not parse IP address.') elif typ == 'rid': return x509.RegisteredID(x509.ObjectIdentifier(name)) elif typ == 'othername': regex = "(.*);(.*):(.*)" if re.match(regex, name) is not None: oid, asn_typ, val = re.match(regex, name).groups() oid = x509.ObjectIdentifier(oid) if asn_typ == 'UTF8': val = val.encode('utf-8') elif asn_typ == 'OctetString': val = bytes(bytearray.fromhex(val)) val = OctetString(val).dump() else: raise ValueError('Unsupported ASN type in otherName: %s' % asn_typ) val = force_bytes(val) return x509.OtherName(oid, val) else: raise ValueError('Incorrect otherName format: %s' % name) elif typ == 'dirname': return x509.DirectoryName(x509_name(name)) else: # Try to encode the domain name. DNSName() does not validate the domain name, but this # check will fail. if name.startswith('*.'): idna.encode(name[2:]) elif name.startswith('.'): idna.encode(name[1:]) else: idna.encode(name) return x509.DNSName(name)
python
def parse_general_name(name): name = force_text(name) typ = None match = GENERAL_NAME_RE.match(name) if match is not None: typ, name = match.groups() typ = typ.lower() if typ is None: if re.match('[a-z0-9]{2,}://', name): # Looks like a URI try: return x509.UniformResourceIdentifier(name) except Exception: # pragma: no cover - this really accepts anything pass if '@' in name: # Looks like an Email address try: return x509.RFC822Name(validate_email(name)) except Exception: pass if name.strip().startswith('/'): # maybe it's a dirname? return x509.DirectoryName(x509_name(name)) # Try to parse this as IPAddress/Network try: return x509.IPAddress(ip_address(name)) except ValueError: pass try: return x509.IPAddress(ip_network(name)) except ValueError: pass # Try to encode as domain name. DNSName() does not validate the domain name, but this check will fail. if name.startswith('*.'): idna.encode(name[2:]) elif name.startswith('.'): idna.encode(name[1:]) else: idna.encode(name) # Almost anything passes as DNS name, so this is our default fallback return x509.DNSName(name) if typ == 'uri': return x509.UniformResourceIdentifier(name) elif typ == 'email': return x509.RFC822Name(validate_email(name)) elif typ == 'ip': try: return x509.IPAddress(ip_address(name)) except ValueError: pass try: return x509.IPAddress(ip_network(name)) except ValueError: pass raise ValueError('Could not parse IP address.') elif typ == 'rid': return x509.RegisteredID(x509.ObjectIdentifier(name)) elif typ == 'othername': regex = "(.*);(.*):(.*)" if re.match(regex, name) is not None: oid, asn_typ, val = re.match(regex, name).groups() oid = x509.ObjectIdentifier(oid) if asn_typ == 'UTF8': val = val.encode('utf-8') elif asn_typ == 'OctetString': val = bytes(bytearray.fromhex(val)) val = OctetString(val).dump() else: raise ValueError('Unsupported ASN type in otherName: %s' % asn_typ) val = force_bytes(val) return x509.OtherName(oid, val) else: raise ValueError('Incorrect otherName format: %s' % name) elif typ == 'dirname': return x509.DirectoryName(x509_name(name)) else: # Try to encode the domain name. DNSName() does not validate the domain name, but this # check will fail. if name.startswith('*.'): idna.encode(name[2:]) elif name.startswith('.'): idna.encode(name[1:]) else: idna.encode(name) return x509.DNSName(name)
[ "def", "parse_general_name", "(", "name", ")", ":", "name", "=", "force_text", "(", "name", ")", "typ", "=", "None", "match", "=", "GENERAL_NAME_RE", ".", "match", "(", "name", ")", "if", "match", "is", "not", "None", ":", "typ", ",", "name", "=", "m...
Parse a general name from user input. This function will do its best to detect the intended type of any value passed to it: >>> parse_general_name('example.com') <DNSName(value='example.com')> >>> parse_general_name('*.example.com') <DNSName(value='*.example.com')> >>> parse_general_name('.example.com') # Syntax used e.g. for NameConstraints: All levels of subdomains <DNSName(value='.example.com')> >>> parse_general_name('user@example.com') <RFC822Name(value='user@example.com')> >>> parse_general_name('https://example.com') <UniformResourceIdentifier(value='https://example.com')> >>> parse_general_name('1.2.3.4') <IPAddress(value=1.2.3.4)> >>> parse_general_name('fd00::1') <IPAddress(value=fd00::1)> >>> parse_general_name('/CN=example.com') <DirectoryName(value=<Name(CN=example.com)>)> The default fallback is to assume a :py:class:`~cg:cryptography.x509.DNSName`. If this doesn't work, an exception will be raised: >>> parse_general_name('foo..bar`*123') # doctest: +ELLIPSIS Traceback (most recent call last): ... idna.core.IDNAError: ... If you want to override detection, you can prefix the name to match :py:const:`GENERAL_NAME_RE`: >>> parse_general_name('email:user@example.com') <RFC822Name(value='user@example.com')> >>> parse_general_name('URI:https://example.com') <UniformResourceIdentifier(value='https://example.com')> >>> parse_general_name('dirname:/CN=example.com') <DirectoryName(value=<Name(CN=example.com)>)> Some more exotic values can only be generated by using this prefix: >>> parse_general_name('rid:2.5.4.3') <RegisteredID(value=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>)> >>> parse_general_name('otherName:2.5.4.3;UTF8:example.com') <OtherName(type_id=<ObjectIdentifier(oid=2.5.4.3, name=commonName)>, value=b'example.com')> If you give a prefixed value, this function is less forgiving of any typos and does not catch any exceptions: >>> parse_general_name('email:foo@bar com') Traceback (most recent call last): ... ValueError: Invalid domain: bar com
[ "Parse", "a", "general", "name", "from", "user", "input", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L345-L490
19,956
mathiasertl/django-ca
ca/django_ca/utils.py
parse_hash_algorithm
def parse_hash_algorithm(value=None): """Parse a hash algorithm value. The most common use case is to pass a str naming a class in :py:mod:`~cg:cryptography.hazmat.primitives.hashes`. For convenience, passing ``None`` will return the value of :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>`, and passing an :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` will return that instance unchanged. Example usage:: >>> parse_hash_algorithm() # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm('SHA512') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm(' SHA512 ') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm(hashes.SHA512) # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm(hashes.SHA512()) # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm('Wrong') # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Unknown hash algorithm: Wrong >>> parse_hash_algorithm(object()) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Unknown type passed: object Parameters ---------- value : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional The value to parse, the function description on how possible values are used. Returns ------- algorithm A :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` instance. Raises ------ ValueError If an unknown object is passed or if ``value`` does not name a known algorithm. """ if value is None: return ca_settings.CA_DIGEST_ALGORITHM elif isinstance(value, type) and issubclass(value, hashes.HashAlgorithm): return value() elif isinstance(value, hashes.HashAlgorithm): return value elif isinstance(value, six.string_types): try: return getattr(hashes, value.strip())() except AttributeError: raise ValueError('Unknown hash algorithm: %s' % value) else: raise ValueError('Unknown type passed: %s' % type(value).__name__)
python
def parse_hash_algorithm(value=None): if value is None: return ca_settings.CA_DIGEST_ALGORITHM elif isinstance(value, type) and issubclass(value, hashes.HashAlgorithm): return value() elif isinstance(value, hashes.HashAlgorithm): return value elif isinstance(value, six.string_types): try: return getattr(hashes, value.strip())() except AttributeError: raise ValueError('Unknown hash algorithm: %s' % value) else: raise ValueError('Unknown type passed: %s' % type(value).__name__)
[ "def", "parse_hash_algorithm", "(", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "return", "ca_settings", ".", "CA_DIGEST_ALGORITHM", "elif", "isinstance", "(", "value", ",", "type", ")", "and", "issubclass", "(", "value", ",", "hashes",...
Parse a hash algorithm value. The most common use case is to pass a str naming a class in :py:mod:`~cg:cryptography.hazmat.primitives.hashes`. For convenience, passing ``None`` will return the value of :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>`, and passing an :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` will return that instance unchanged. Example usage:: >>> parse_hash_algorithm() # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm('SHA512') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm(' SHA512 ') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm(hashes.SHA512) # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm(hashes.SHA512()) # doctest: +ELLIPSIS <cryptography.hazmat.primitives.hashes.SHA512 object at ...> >>> parse_hash_algorithm('Wrong') # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Unknown hash algorithm: Wrong >>> parse_hash_algorithm(object()) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Unknown type passed: object Parameters ---------- value : str or :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm`, optional The value to parse, the function description on how possible values are used. Returns ------- algorithm A :py:class:`~cg:cryptography.hazmat.primitives.hashes.HashAlgorithm` instance. Raises ------ ValueError If an unknown object is passed or if ``value`` does not name a known algorithm.
[ "Parse", "a", "hash", "algorithm", "value", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L493-L555
19,957
mathiasertl/django-ca
ca/django_ca/utils.py
parse_encoding
def parse_encoding(value=None): """Parse a value to a valid encoding. This function accepts either a member of :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding` or a string describing a member. If no value is passed, it will assume ``PEM`` as a default value. Note that ``"ASN1"`` is treated as an alias for ``"DER"``. >>> parse_encoding() <Encoding.PEM: 'PEM'> >>> parse_encoding('DER') <Encoding.DER: 'DER'> >>> parse_encoding(Encoding.PEM) <Encoding.PEM: 'PEM'> """ if value is None: return ca_settings.CA_DEFAULT_ENCODING elif isinstance(value, Encoding): return value elif isinstance(value, six.string_types): if value == 'ASN1': value = 'DER' try: return getattr(Encoding, value) except AttributeError: raise ValueError('Unknown encoding: %s' % value) else: raise ValueError('Unknown type passed: %s' % type(value).__name__)
python
def parse_encoding(value=None): if value is None: return ca_settings.CA_DEFAULT_ENCODING elif isinstance(value, Encoding): return value elif isinstance(value, six.string_types): if value == 'ASN1': value = 'DER' try: return getattr(Encoding, value) except AttributeError: raise ValueError('Unknown encoding: %s' % value) else: raise ValueError('Unknown type passed: %s' % type(value).__name__)
[ "def", "parse_encoding", "(", "value", "=", "None", ")", ":", "if", "value", "is", "None", ":", "return", "ca_settings", ".", "CA_DEFAULT_ENCODING", "elif", "isinstance", "(", "value", ",", "Encoding", ")", ":", "return", "value", "elif", "isinstance", "(", ...
Parse a value to a valid encoding. This function accepts either a member of :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding` or a string describing a member. If no value is passed, it will assume ``PEM`` as a default value. Note that ``"ASN1"`` is treated as an alias for ``"DER"``. >>> parse_encoding() <Encoding.PEM: 'PEM'> >>> parse_encoding('DER') <Encoding.DER: 'DER'> >>> parse_encoding(Encoding.PEM) <Encoding.PEM: 'PEM'>
[ "Parse", "a", "value", "to", "a", "valid", "encoding", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L558-L586
19,958
mathiasertl/django-ca
ca/django_ca/utils.py
parse_key_curve
def parse_key_curve(value=None): """Parse an elliptic curve value. This function uses a value identifying an elliptic curve to return an :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance. The name must match a class name of one of the classes named under "Elliptic Curves" in :any:`cg:hazmat/primitives/asymmetric/ec`. For convenience, passing ``None`` will return the value of :ref:`CA_DEFAULT_ECC_CURVE <settings-ca-default-ecc-curve>`, and passing an :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` will return that instance unchanged. Example usage:: >>> parse_key_curve('SECP256R1') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...> >>> parse_key_curve('SECP384R1') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP384R1 object at ...> >>> parse_key_curve(ec.SECP256R1()) # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...> >>> parse_key_curve() # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...> Parameters ---------- value : str, otional The name of the curve or ``None`` to return the default curve. Returns ------- curve An :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance. Raises ------ ValueError If the named curve is not supported. """ if isinstance(value, ec.EllipticCurve): return value # name was already parsed if value is None: return ca_settings.CA_DEFAULT_ECC_CURVE curve = getattr(ec, value.strip(), type) if not issubclass(curve, ec.EllipticCurve): raise ValueError('%s: Not a known Eliptic Curve' % value) return curve()
python
def parse_key_curve(value=None): if isinstance(value, ec.EllipticCurve): return value # name was already parsed if value is None: return ca_settings.CA_DEFAULT_ECC_CURVE curve = getattr(ec, value.strip(), type) if not issubclass(curve, ec.EllipticCurve): raise ValueError('%s: Not a known Eliptic Curve' % value) return curve()
[ "def", "parse_key_curve", "(", "value", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "ec", ".", "EllipticCurve", ")", ":", "return", "value", "# name was already parsed", "if", "value", "is", "None", ":", "return", "ca_settings", ".", "CA_DE...
Parse an elliptic curve value. This function uses a value identifying an elliptic curve to return an :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance. The name must match a class name of one of the classes named under "Elliptic Curves" in :any:`cg:hazmat/primitives/asymmetric/ec`. For convenience, passing ``None`` will return the value of :ref:`CA_DEFAULT_ECC_CURVE <settings-ca-default-ecc-curve>`, and passing an :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` will return that instance unchanged. Example usage:: >>> parse_key_curve('SECP256R1') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...> >>> parse_key_curve('SECP384R1') # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP384R1 object at ...> >>> parse_key_curve(ec.SECP256R1()) # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...> >>> parse_key_curve() # doctest: +ELLIPSIS <cryptography.hazmat.primitives.asymmetric.ec.SECP256R1 object at ...> Parameters ---------- value : str, otional The name of the curve or ``None`` to return the default curve. Returns ------- curve An :py:class:`~cg:cryptography.hazmat.primitives.asymmetric.ec.EllipticCurve` instance. Raises ------ ValueError If the named curve is not supported.
[ "Parse", "an", "elliptic", "curve", "value", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L589-L639
19,959
mathiasertl/django-ca
ca/django_ca/utils.py
get_cert_builder
def get_cert_builder(expires): """Get a basic X509 cert builder object. Parameters ---------- expires : datetime When this certificate will expire. """ now = datetime.utcnow().replace(second=0, microsecond=0) if expires is None: expires = get_expires(expires, now=now) expires = expires.replace(second=0, microsecond=0) builder = x509.CertificateBuilder() builder = builder.not_valid_before(now) builder = builder.not_valid_after(expires) builder = builder.serial_number(x509.random_serial_number()) return builder
python
def get_cert_builder(expires): now = datetime.utcnow().replace(second=0, microsecond=0) if expires is None: expires = get_expires(expires, now=now) expires = expires.replace(second=0, microsecond=0) builder = x509.CertificateBuilder() builder = builder.not_valid_before(now) builder = builder.not_valid_after(expires) builder = builder.serial_number(x509.random_serial_number()) return builder
[ "def", "get_cert_builder", "(", "expires", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "second", "=", "0", ",", "microsecond", "=", "0", ")", "if", "expires", "is", "None", ":", "expires", "=", "get_expires", "(", ...
Get a basic X509 cert builder object. Parameters ---------- expires : datetime When this certificate will expire.
[ "Get", "a", "basic", "X509", "cert", "builder", "object", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L656-L676
19,960
mathiasertl/django-ca
ca/django_ca/utils.py
wrap_file_exceptions
def wrap_file_exceptions(): """Contextmanager to wrap file exceptions into identicaly exceptions in py2 and py3. This should be removed once py2 support is dropped. """ try: yield except (PermissionError, FileNotFoundError): # pragma: only py3 # In py3, we want to raise Exception unchanged, so there would be no need for this block. # BUT (IOError, OSError) - see below - also matches, so we capture it here raise except (IOError, OSError) as e: # pragma: only py2 if e.errno == errno.EACCES: raise PermissionError(str(e)) elif e.errno == errno.ENOENT: raise FileNotFoundError(str(e)) raise
python
def wrap_file_exceptions(): try: yield except (PermissionError, FileNotFoundError): # pragma: only py3 # In py3, we want to raise Exception unchanged, so there would be no need for this block. # BUT (IOError, OSError) - see below - also matches, so we capture it here raise except (IOError, OSError) as e: # pragma: only py2 if e.errno == errno.EACCES: raise PermissionError(str(e)) elif e.errno == errno.ENOENT: raise FileNotFoundError(str(e)) raise
[ "def", "wrap_file_exceptions", "(", ")", ":", "try", ":", "yield", "except", "(", "PermissionError", ",", "FileNotFoundError", ")", ":", "# pragma: only py3", "# In py3, we want to raise Exception unchanged, so there would be no need for this block.", "# BUT (IOError, OSError) - se...
Contextmanager to wrap file exceptions into identicaly exceptions in py2 and py3. This should be removed once py2 support is dropped.
[ "Contextmanager", "to", "wrap", "file", "exceptions", "into", "identicaly", "exceptions", "in", "py2", "and", "py3", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L695-L711
19,961
mathiasertl/django-ca
ca/django_ca/utils.py
read_file
def read_file(path): """Read the file from the given path. If ``path`` is an absolute path, reads a file from the local filesystem. For relative paths, read the file using the storage backend configured using :ref:`CA_FILE_STORAGE <settings-ca-file-storage>`. """ if os.path.isabs(path): with wrap_file_exceptions(): with open(path, 'rb') as stream: return stream.read() with wrap_file_exceptions(): stream = ca_storage.open(path) try: return stream.read() finally: stream.close()
python
def read_file(path): if os.path.isabs(path): with wrap_file_exceptions(): with open(path, 'rb') as stream: return stream.read() with wrap_file_exceptions(): stream = ca_storage.open(path) try: return stream.read() finally: stream.close()
[ "def", "read_file", "(", "path", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "with", "wrap_file_exceptions", "(", ")", ":", "with", "open", "(", "path", ",", "'rb'", ")", "as", "stream", ":", "return", "stream", ".", "re...
Read the file from the given path. If ``path`` is an absolute path, reads a file from the local filesystem. For relative paths, read the file using the storage backend configured using :ref:`CA_FILE_STORAGE <settings-ca-file-storage>`.
[ "Read", "the", "file", "from", "the", "given", "path", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L714-L731
19,962
mathiasertl/django-ca
ca/django_ca/utils.py
get_extension_name
def get_extension_name(ext): """Function to get the name of an extension.""" # In cryptography 2.2, SCTs return "Unknown OID" if ext.oid == ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: return 'SignedCertificateTimestampList' # Until at least cryptography 2.6.1, PrecertPoison has no name # https://github.com/pyca/cryptography/issues/4817 elif ca_settings.CRYPTOGRAPHY_HAS_PRECERT_POISON: # pragma: no branch, pragma: only cryptography>=2.4 if ext.oid == ExtensionOID.PRECERT_POISON: return 'PrecertPoison' # uppercase the FIRST letter only ("keyUsage" -> "KeyUsage") return re.sub('^([a-z])', lambda x: x.groups()[0].upper(), ext.oid._name)
python
def get_extension_name(ext): # In cryptography 2.2, SCTs return "Unknown OID" if ext.oid == ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS: return 'SignedCertificateTimestampList' # Until at least cryptography 2.6.1, PrecertPoison has no name # https://github.com/pyca/cryptography/issues/4817 elif ca_settings.CRYPTOGRAPHY_HAS_PRECERT_POISON: # pragma: no branch, pragma: only cryptography>=2.4 if ext.oid == ExtensionOID.PRECERT_POISON: return 'PrecertPoison' # uppercase the FIRST letter only ("keyUsage" -> "KeyUsage") return re.sub('^([a-z])', lambda x: x.groups()[0].upper(), ext.oid._name)
[ "def", "get_extension_name", "(", "ext", ")", ":", "# In cryptography 2.2, SCTs return \"Unknown OID\"", "if", "ext", ".", "oid", "==", "ExtensionOID", ".", "PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS", ":", "return", "'SignedCertificateTimestampList'", "# Until at least cryptography ...
Function to get the name of an extension.
[ "Function", "to", "get", "the", "name", "of", "an", "extension", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L734-L748
19,963
mathiasertl/django-ca
ca/django_ca/utils.py
shlex_split
def shlex_split(s, sep): """Split a character on the given set of characters. Example:: >>> shlex_split('foo,bar', ', ') ['foo', 'bar'] >>> shlex_split('foo\\\\,bar1', ',') # escape a separator ['foo,bar1'] >>> shlex_split('"foo,bar", bla', ', ') ['foo,bar', 'bla'] >>> shlex_split('foo,"bar,bla"', ',') ['foo', 'bar,bla'] """ lex = shlex.shlex(s, posix=True) lex.whitespace = sep lex.whitespace_split = True return [l for l in lex]
python
def shlex_split(s, sep): lex = shlex.shlex(s, posix=True) lex.whitespace = sep lex.whitespace_split = True return [l for l in lex]
[ "def", "shlex_split", "(", "s", ",", "sep", ")", ":", "lex", "=", "shlex", ".", "shlex", "(", "s", ",", "posix", "=", "True", ")", "lex", ".", "whitespace", "=", "sep", "lex", ".", "whitespace_split", "=", "True", "return", "[", "l", "for", "l", ...
Split a character on the given set of characters. Example:: >>> shlex_split('foo,bar', ', ') ['foo', 'bar'] >>> shlex_split('foo\\\\,bar1', ',') # escape a separator ['foo,bar1'] >>> shlex_split('"foo,bar", bla', ', ') ['foo,bar', 'bla'] >>> shlex_split('foo,"bar,bla"', ',') ['foo', 'bar,bla']
[ "Split", "a", "character", "on", "the", "given", "set", "of", "characters", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/utils.py#L766-L783
19,964
mathiasertl/django-ca
ca/django_ca/models.py
X509CertMixin.get_revocation_reason
def get_revocation_reason(self): """Get the revocation reason of this certificate.""" if self.revoked is False: return if self.revoked_reason == '' or self.revoked_reason is None: return x509.ReasonFlags.unspecified else: return getattr(x509.ReasonFlags, self.revoked_reason)
python
def get_revocation_reason(self): if self.revoked is False: return if self.revoked_reason == '' or self.revoked_reason is None: return x509.ReasonFlags.unspecified else: return getattr(x509.ReasonFlags, self.revoked_reason)
[ "def", "get_revocation_reason", "(", "self", ")", ":", "if", "self", ".", "revoked", "is", "False", ":", "return", "if", "self", ".", "revoked_reason", "==", "''", "or", "self", ".", "revoked_reason", "is", "None", ":", "return", "x509", ".", "ReasonFlags"...
Get the revocation reason of this certificate.
[ "Get", "the", "revocation", "reason", "of", "this", "certificate", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L159-L167
19,965
mathiasertl/django-ca
ca/django_ca/models.py
X509CertMixin.get_revocation_time
def get_revocation_time(self): """Get the revocation time as naive datetime. Note that this method is only used by cryptography>=2.4. """ if self.revoked is False: return if timezone.is_aware(self.revoked_date): # convert datetime object to UTC and make it naive return timezone.make_naive(self.revoked_date, pytz.utc) return self.revoked_date
python
def get_revocation_time(self): if self.revoked is False: return if timezone.is_aware(self.revoked_date): # convert datetime object to UTC and make it naive return timezone.make_naive(self.revoked_date, pytz.utc) return self.revoked_date
[ "def", "get_revocation_time", "(", "self", ")", ":", "if", "self", ".", "revoked", "is", "False", ":", "return", "if", "timezone", ".", "is_aware", "(", "self", ".", "revoked_date", ")", ":", "# convert datetime object to UTC and make it naive", "return", "timezon...
Get the revocation time as naive datetime. Note that this method is only used by cryptography>=2.4.
[ "Get", "the", "revocation", "time", "as", "naive", "datetime", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L179-L191
19,966
mathiasertl/django-ca
ca/django_ca/models.py
CertificateAuthority.get_authority_key_identifier
def get_authority_key_identifier(self): """Return the AuthorityKeyIdentifier extension used in certificates signed by this CA.""" try: ski = self.x509.extensions.get_extension_for_class(x509.SubjectKeyIdentifier) except x509.ExtensionNotFound: return x509.AuthorityKeyIdentifier.from_issuer_public_key(self.x509.public_key()) else: return x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski)
python
def get_authority_key_identifier(self): try: ski = self.x509.extensions.get_extension_for_class(x509.SubjectKeyIdentifier) except x509.ExtensionNotFound: return x509.AuthorityKeyIdentifier.from_issuer_public_key(self.x509.public_key()) else: return x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski)
[ "def", "get_authority_key_identifier", "(", "self", ")", ":", "try", ":", "ski", "=", "self", ".", "x509", ".", "extensions", ".", "get_extension_for_class", "(", "x509", ".", "SubjectKeyIdentifier", ")", "except", "x509", ".", "ExtensionNotFound", ":", "return"...
Return the AuthorityKeyIdentifier extension used in certificates signed by this CA.
[ "Return", "the", "AuthorityKeyIdentifier", "extension", "used", "in", "certificates", "signed", "by", "this", "CA", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L598-L606
19,967
mathiasertl/django-ca
ca/django_ca/models.py
CertificateAuthority.max_pathlen
def max_pathlen(self): """The maximum pathlen for any intermediate CAs signed by this CA. This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an ``int`` if any parent CA has the attribute. """ pathlen = self.pathlen if self.parent is None: return pathlen max_parent = self.parent.max_pathlen if max_parent is None: return pathlen elif pathlen is None: return max_parent - 1 else: return min(self.pathlen, max_parent - 1)
python
def max_pathlen(self): pathlen = self.pathlen if self.parent is None: return pathlen max_parent = self.parent.max_pathlen if max_parent is None: return pathlen elif pathlen is None: return max_parent - 1 else: return min(self.pathlen, max_parent - 1)
[ "def", "max_pathlen", "(", "self", ")", ":", "pathlen", "=", "self", ".", "pathlen", "if", "self", ".", "parent", "is", "None", ":", "return", "pathlen", "max_parent", "=", "self", ".", "parent", ".", "max_pathlen", "if", "max_parent", "is", "None", ":",...
The maximum pathlen for any intermediate CAs signed by this CA. This value is either ``None``, if this and all parent CAs don't have a ``pathlen`` attribute, or an ``int`` if any parent CA has the attribute.
[ "The", "maximum", "pathlen", "for", "any", "intermediate", "CAs", "signed", "by", "this", "CA", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L721-L739
19,968
mathiasertl/django-ca
ca/django_ca/models.py
CertificateAuthority.bundle
def bundle(self): """A list of any parent CAs, including this CA. The list is ordered so the Root CA will be the first. """ ca = self bundle = [ca] while ca.parent is not None: bundle.append(ca.parent) ca = ca.parent return bundle
python
def bundle(self): ca = self bundle = [ca] while ca.parent is not None: bundle.append(ca.parent) ca = ca.parent return bundle
[ "def", "bundle", "(", "self", ")", ":", "ca", "=", "self", "bundle", "=", "[", "ca", "]", "while", "ca", ".", "parent", "is", "not", "None", ":", "bundle", ".", "append", "(", "ca", ".", "parent", ")", "ca", "=", "ca", ".", "parent", "return", ...
A list of any parent CAs, including this CA. The list is ordered so the Root CA will be the first.
[ "A", "list", "of", "any", "parent", "CAs", "including", "this", "CA", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/models.py#L749-L760
19,969
mathiasertl/django-ca
ca/django_ca/querysets.py
CertificateQuerySet.valid
def valid(self): """Return valid certificates.""" now = timezone.now() return self.filter(revoked=False, expires__gt=now, valid_from__lt=now)
python
def valid(self): now = timezone.now() return self.filter(revoked=False, expires__gt=now, valid_from__lt=now)
[ "def", "valid", "(", "self", ")", ":", "now", "=", "timezone", ".", "now", "(", ")", "return", "self", ".", "filter", "(", "revoked", "=", "False", ",", "expires__gt", "=", "now", ",", "valid_from__lt", "=", "now", ")" ]
Return valid certificates.
[ "Return", "valid", "certificates", "." ]
976d7ea05276320f20daed2a6d59c8f5660fe976
https://github.com/mathiasertl/django-ca/blob/976d7ea05276320f20daed2a6d59c8f5660fe976/ca/django_ca/querysets.py#L45-L49
19,970
saltstack/salt-pylint
setup.py
_release_version
def _release_version(): ''' Returns release version ''' with io.open(os.path.join(SETUP_DIRNAME, 'saltpylint', 'version.py'), encoding='utf-8') as fh_: exec_locals = {} exec_globals = {} contents = fh_.read() if not isinstance(contents, str): contents = contents.encode('utf-8') exec(contents, exec_globals, exec_locals) # pylint: disable=exec-used return exec_locals['__version__']
python
def _release_version(): ''' Returns release version ''' with io.open(os.path.join(SETUP_DIRNAME, 'saltpylint', 'version.py'), encoding='utf-8') as fh_: exec_locals = {} exec_globals = {} contents = fh_.read() if not isinstance(contents, str): contents = contents.encode('utf-8') exec(contents, exec_globals, exec_locals) # pylint: disable=exec-used return exec_locals['__version__']
[ "def", "_release_version", "(", ")", ":", "with", "io", ".", "open", "(", "os", ".", "path", ".", "join", "(", "SETUP_DIRNAME", ",", "'saltpylint'", ",", "'version.py'", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "fh_", ":", "exec_locals", "=", "...
Returns release version
[ "Returns", "release", "version" ]
524a419d3bfc7dbd91c9c85040bc64935a275b24
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/setup.py#L45-L56
19,971
saltstack/salt-pylint
saltpylint/ext/pyqver2.py
get_versions
def get_versions(source): """Return information about the Python versions required for specific features. The return value is a dictionary with keys as a version number as a tuple (for example Python 2.6 is (2,6)) and the value are a list of features that require the indicated Python version. """ tree = compiler.parse(source) checker = compiler.walk(tree, NodeChecker()) return checker.vers
python
def get_versions(source): tree = compiler.parse(source) checker = compiler.walk(tree, NodeChecker()) return checker.vers
[ "def", "get_versions", "(", "source", ")", ":", "tree", "=", "compiler", ".", "parse", "(", "source", ")", "checker", "=", "compiler", ".", "walk", "(", "tree", ",", "NodeChecker", "(", ")", ")", "return", "checker", ".", "vers" ]
Return information about the Python versions required for specific features. The return value is a dictionary with keys as a version number as a tuple (for example Python 2.6 is (2,6)) and the value are a list of features that require the indicated Python version.
[ "Return", "information", "about", "the", "Python", "versions", "required", "for", "specific", "features", "." ]
524a419d3bfc7dbd91c9c85040bc64935a275b24
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/saltpylint/ext/pyqver2.py#L252-L261
19,972
saltstack/salt-pylint
saltpylint/strings.py
StringLiteralChecker.process_non_raw_string_token
def process_non_raw_string_token(self, prefix, string_body, start_row): ''' check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source. ''' if 'u' in prefix: if string_body.find('\\0') != -1: self.add_message('null-byte-unicode-literal', line=start_row)
python
def process_non_raw_string_token(self, prefix, string_body, start_row): ''' check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source. ''' if 'u' in prefix: if string_body.find('\\0') != -1: self.add_message('null-byte-unicode-literal', line=start_row)
[ "def", "process_non_raw_string_token", "(", "self", ",", "prefix", ",", "string_body", ",", "start_row", ")", ":", "if", "'u'", "in", "prefix", ":", "if", "string_body", ".", "find", "(", "'\\\\0'", ")", "!=", "-", "1", ":", "self", ".", "add_message", "...
check for bad escapes in a non-raw string. prefix: lowercase string of eg 'ur' string prefix markers. string_body: the un-parsed body of the string, not including the quote marks. start_row: integer line number in the source.
[ "check", "for", "bad", "escapes", "in", "a", "non", "-", "raw", "string", "." ]
524a419d3bfc7dbd91c9c85040bc64935a275b24
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/saltpylint/strings.py#L247-L258
19,973
saltstack/salt-pylint
saltpylint/blacklist.py
register
def register(linter): ''' Required method to auto register this checker ''' linter.register_checker(ResourceLeakageChecker(linter)) linter.register_checker(BlacklistedImportsChecker(linter)) linter.register_checker(MovedTestCaseClassChecker(linter)) linter.register_checker(BlacklistedLoaderModulesUsageChecker(linter)) linter.register_checker(BlacklistedFunctionsChecker(linter))
python
def register(linter): ''' Required method to auto register this checker ''' linter.register_checker(ResourceLeakageChecker(linter)) linter.register_checker(BlacklistedImportsChecker(linter)) linter.register_checker(MovedTestCaseClassChecker(linter)) linter.register_checker(BlacklistedLoaderModulesUsageChecker(linter)) linter.register_checker(BlacklistedFunctionsChecker(linter))
[ "def", "register", "(", "linter", ")", ":", "linter", ".", "register_checker", "(", "ResourceLeakageChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", "(", "BlacklistedImportsChecker", "(", "linter", ")", ")", "linter", ".", "register_checker", ...
Required method to auto register this checker
[ "Required", "method", "to", "auto", "register", "this", "checker" ]
524a419d3bfc7dbd91c9c85040bc64935a275b24
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/saltpylint/blacklist.py#L560-L568
19,974
saltstack/salt-pylint
saltpylint/smartup.py
register
def register(linter): ''' Register the transformation functions. ''' try: MANAGER.register_transform(nodes.Class, rootlogger_transform) except AttributeError: MANAGER.register_transform(nodes.ClassDef, rootlogger_transform)
python
def register(linter): ''' Register the transformation functions. ''' try: MANAGER.register_transform(nodes.Class, rootlogger_transform) except AttributeError: MANAGER.register_transform(nodes.ClassDef, rootlogger_transform)
[ "def", "register", "(", "linter", ")", ":", "try", ":", "MANAGER", ".", "register_transform", "(", "nodes", ".", "Class", ",", "rootlogger_transform", ")", "except", "AttributeError", ":", "MANAGER", ".", "register_transform", "(", "nodes", ".", "ClassDef", ",...
Register the transformation functions.
[ "Register", "the", "transformation", "functions", "." ]
524a419d3bfc7dbd91c9c85040bc64935a275b24
https://github.com/saltstack/salt-pylint/blob/524a419d3bfc7dbd91c9c85040bc64935a275b24/saltpylint/smartup.py#L39-L46
19,975
edx/xblock-utils
xblockutils/settings.py
XBlockWithSettingsMixin.get_xblock_settings
def get_xblock_settings(self, default=None): """ Gets XBlock-specific settigns for current XBlock Returns default if settings service is not available. Parameters: default - default value to be used in two cases: * No settings service is available * As a `default` parameter to `SettingsService.get_settings_bucket` """ settings_service = self.runtime.service(self, "settings") if settings_service: return settings_service.get_settings_bucket(self, default=default) return default
python
def get_xblock_settings(self, default=None): settings_service = self.runtime.service(self, "settings") if settings_service: return settings_service.get_settings_bucket(self, default=default) return default
[ "def", "get_xblock_settings", "(", "self", ",", "default", "=", "None", ")", ":", "settings_service", "=", "self", ".", "runtime", ".", "service", "(", "self", ",", "\"settings\"", ")", "if", "settings_service", ":", "return", "settings_service", ".", "get_set...
Gets XBlock-specific settigns for current XBlock Returns default if settings service is not available. Parameters: default - default value to be used in two cases: * No settings service is available * As a `default` parameter to `SettingsService.get_settings_bucket`
[ "Gets", "XBlock", "-", "specific", "settigns", "for", "current", "XBlock" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/settings.py#L25-L39
19,976
edx/xblock-utils
xblockutils/settings.py
ThemableXBlockMixin.include_theme_files
def include_theme_files(self, fragment): """ Gets theme configuration and renders theme css into fragment """ theme = self.get_theme() if not theme or 'package' not in theme: return theme_package, theme_files = theme.get('package', None), theme.get('locations', []) resource_loader = ResourceLoader(theme_package) for theme_file in theme_files: fragment.add_css(resource_loader.load_unicode(theme_file))
python
def include_theme_files(self, fragment): theme = self.get_theme() if not theme or 'package' not in theme: return theme_package, theme_files = theme.get('package', None), theme.get('locations', []) resource_loader = ResourceLoader(theme_package) for theme_file in theme_files: fragment.add_css(resource_loader.load_unicode(theme_file))
[ "def", "include_theme_files", "(", "self", ",", "fragment", ")", ":", "theme", "=", "self", ".", "get_theme", "(", ")", "if", "not", "theme", "or", "'package'", "not", "in", "theme", ":", "return", "theme_package", ",", "theme_files", "=", "theme", ".", ...
Gets theme configuration and renders theme css into fragment
[ "Gets", "theme", "configuration", "and", "renders", "theme", "css", "into", "fragment" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/settings.py#L81-L92
19,977
edx/xblock-utils
xblockutils/resources.py
ResourceLoader.load_unicode
def load_unicode(self, resource_path): """ Gets the content of a resource """ resource_content = pkg_resources.resource_string(self.module_name, resource_path) return resource_content.decode('utf-8')
python
def load_unicode(self, resource_path): resource_content = pkg_resources.resource_string(self.module_name, resource_path) return resource_content.decode('utf-8')
[ "def", "load_unicode", "(", "self", ",", "resource_path", ")", ":", "resource_content", "=", "pkg_resources", ".", "resource_string", "(", "self", ".", "module_name", ",", "resource_path", ")", "return", "resource_content", ".", "decode", "(", "'utf-8'", ")" ]
Gets the content of a resource
[ "Gets", "the", "content", "of", "a", "resource" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/resources.py#L46-L51
19,978
edx/xblock-utils
xblockutils/resources.py
ResourceLoader.render_django_template
def render_django_template(self, template_path, context=None, i18n_service=None): """ Evaluate a django template by resource path, applying the provided context. """ context = context or {} context['_i18n_service'] = i18n_service libraries = { 'i18n': 'xblockutils.templatetags.i18n', } # For django 1.8, we have to load the libraries manually, and restore them once the template is rendered. _libraries = None if django.VERSION[0] == 1 and django.VERSION[1] == 8: _libraries = TemplateBase.libraries.copy() for library_name in libraries: library = TemplateBase.import_library(libraries[library_name]) if library: TemplateBase.libraries[library_name] = library engine = Engine() else: # Django>1.8 Engine can load the extra templatetag libraries itself # but we have to override the default installed libraries. from django.template.backends.django import get_installed_libraries installed_libraries = get_installed_libraries() installed_libraries.update(libraries) engine = Engine(libraries=installed_libraries) template_str = self.load_unicode(template_path) template = Template(template_str, engine=engine) rendered = template.render(Context(context)) # Restore the original TemplateBase.libraries if _libraries is not None: TemplateBase.libraries = _libraries return rendered
python
def render_django_template(self, template_path, context=None, i18n_service=None): context = context or {} context['_i18n_service'] = i18n_service libraries = { 'i18n': 'xblockutils.templatetags.i18n', } # For django 1.8, we have to load the libraries manually, and restore them once the template is rendered. _libraries = None if django.VERSION[0] == 1 and django.VERSION[1] == 8: _libraries = TemplateBase.libraries.copy() for library_name in libraries: library = TemplateBase.import_library(libraries[library_name]) if library: TemplateBase.libraries[library_name] = library engine = Engine() else: # Django>1.8 Engine can load the extra templatetag libraries itself # but we have to override the default installed libraries. from django.template.backends.django import get_installed_libraries installed_libraries = get_installed_libraries() installed_libraries.update(libraries) engine = Engine(libraries=installed_libraries) template_str = self.load_unicode(template_path) template = Template(template_str, engine=engine) rendered = template.render(Context(context)) # Restore the original TemplateBase.libraries if _libraries is not None: TemplateBase.libraries = _libraries return rendered
[ "def", "render_django_template", "(", "self", ",", "template_path", ",", "context", "=", "None", ",", "i18n_service", "=", "None", ")", ":", "context", "=", "context", "or", "{", "}", "context", "[", "'_i18n_service'", "]", "=", "i18n_service", "libraries", ...
Evaluate a django template by resource path, applying the provided context.
[ "Evaluate", "a", "django", "template", "by", "resource", "path", "applying", "the", "provided", "context", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/resources.py#L53-L88
19,979
edx/xblock-utils
xblockutils/resources.py
ResourceLoader.render_mako_template
def render_mako_template(self, template_path, context=None): """ Evaluate a mako template by resource path, applying the provided context """ context = context or {} template_str = self.load_unicode(template_path) lookup = MakoTemplateLookup(directories=[pkg_resources.resource_filename(self.module_name, '')]) template = MakoTemplate(template_str, lookup=lookup) return template.render(**context)
python
def render_mako_template(self, template_path, context=None): context = context or {} template_str = self.load_unicode(template_path) lookup = MakoTemplateLookup(directories=[pkg_resources.resource_filename(self.module_name, '')]) template = MakoTemplate(template_str, lookup=lookup) return template.render(**context)
[ "def", "render_mako_template", "(", "self", ",", "template_path", ",", "context", "=", "None", ")", ":", "context", "=", "context", "or", "{", "}", "template_str", "=", "self", ".", "load_unicode", "(", "template_path", ")", "lookup", "=", "MakoTemplateLookup"...
Evaluate a mako template by resource path, applying the provided context
[ "Evaluate", "a", "mako", "template", "by", "resource", "path", "applying", "the", "provided", "context" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/resources.py#L90-L98
19,980
edx/xblock-utils
xblockutils/resources.py
ResourceLoader.render_template
def render_template(self, template_path, context=None): """ This function has been deprecated. It calls render_django_template to support backwards compatibility. """ warnings.warn( "ResourceLoader.render_template has been deprecated in favor of ResourceLoader.render_django_template" ) return self.render_django_template(template_path, context)
python
def render_template(self, template_path, context=None): warnings.warn( "ResourceLoader.render_template has been deprecated in favor of ResourceLoader.render_django_template" ) return self.render_django_template(template_path, context)
[ "def", "render_template", "(", "self", ",", "template_path", ",", "context", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"ResourceLoader.render_template has been deprecated in favor of ResourceLoader.render_django_template\"", ")", "return", "self", ".", "render_...
This function has been deprecated. It calls render_django_template to support backwards compatibility.
[ "This", "function", "has", "been", "deprecated", ".", "It", "calls", "render_django_template", "to", "support", "backwards", "compatibility", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/resources.py#L100-L107
19,981
edx/xblock-utils
xblockutils/resources.py
ResourceLoader.render_js_template
def render_js_template(self, template_path, element_id, context=None): """ Render a js template. """ context = context or {} return u"<script type='text/template' id='{}'>\n{}\n</script>".format( element_id, self.render_template(template_path, context) )
python
def render_js_template(self, template_path, element_id, context=None): context = context or {} return u"<script type='text/template' id='{}'>\n{}\n</script>".format( element_id, self.render_template(template_path, context) )
[ "def", "render_js_template", "(", "self", ",", "template_path", ",", "element_id", ",", "context", "=", "None", ")", ":", "context", "=", "context", "or", "{", "}", "return", "u\"<script type='text/template' id='{}'>\\n{}\\n</script>\"", ".", "format", "(", "element...
Render a js template.
[ "Render", "a", "js", "template", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/resources.py#L109-L117
19,982
edx/xblock-utils
xblockutils/templatetags/i18n.py
ProxyTransNode.merge_translation
def merge_translation(self, context): """ Context wrapper which modifies the given language's translation catalog using the i18n service, if found. """ language = get_language() i18n_service = context.get('_i18n_service', None) if i18n_service: # Cache the original translation object to reduce overhead if language not in self._translations: self._translations[language] = trans_real.DjangoTranslation(language) translation = trans_real.translation(language) translation.merge(i18n_service) yield # Revert to original translation object if language in self._translations: trans_real._translations[language] = self._translations[language] # Re-activate the current language to reset translation caches trans_real.activate(language)
python
def merge_translation(self, context): language = get_language() i18n_service = context.get('_i18n_service', None) if i18n_service: # Cache the original translation object to reduce overhead if language not in self._translations: self._translations[language] = trans_real.DjangoTranslation(language) translation = trans_real.translation(language) translation.merge(i18n_service) yield # Revert to original translation object if language in self._translations: trans_real._translations[language] = self._translations[language] # Re-activate the current language to reset translation caches trans_real.activate(language)
[ "def", "merge_translation", "(", "self", ",", "context", ")", ":", "language", "=", "get_language", "(", ")", "i18n_service", "=", "context", ".", "get", "(", "'_i18n_service'", ",", "None", ")", "if", "i18n_service", ":", "# Cache the original translation object ...
Context wrapper which modifies the given language's translation catalog using the i18n service, if found.
[ "Context", "wrapper", "which", "modifies", "the", "given", "language", "s", "translation", "catalog", "using", "the", "i18n", "service", "if", "found", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/templatetags/i18n.py#L29-L49
19,983
edx/xblock-utils
xblockutils/templatetags/i18n.py
ProxyTransNode.render
def render(self, context): """ Renders the translated text using the XBlock i18n service, if available. """ with self.merge_translation(context): django_translated = self.do_translate.render(context) return django_translated
python
def render(self, context): with self.merge_translation(context): django_translated = self.do_translate.render(context) return django_translated
[ "def", "render", "(", "self", ",", "context", ")", ":", "with", "self", ".", "merge_translation", "(", "context", ")", ":", "django_translated", "=", "self", ".", "do_translate", ".", "render", "(", "context", ")", "return", "django_translated" ]
Renders the translated text using the XBlock i18n service, if available.
[ "Renders", "the", "translated", "text", "using", "the", "XBlock", "i18n", "service", "if", "available", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/templatetags/i18n.py#L51-L58
19,984
edx/xblock-utils
xblockutils/studio_editable.py
StudioEditableXBlockMixin.studio_view
def studio_view(self, context): """ Render a form for editing this XBlock """ fragment = Fragment() context = {'fields': []} # Build a list of all the fields that can be edited: for field_name in self.editable_fields: field = self.fields[field_name] assert field.scope in (Scope.content, Scope.settings), ( "Only Scope.content or Scope.settings fields can be used with " "StudioEditableXBlockMixin. Other scopes are for user-specific data and are " "not generally created/configured by content authors in Studio." ) field_info = self._make_field_info(field_name, field) if field_info is not None: context["fields"].append(field_info) fragment.content = loader.render_template('templates/studio_edit.html', context) fragment.add_javascript(loader.load_unicode('public/studio_edit.js')) fragment.initialize_js('StudioEditableXBlockMixin') return fragment
python
def studio_view(self, context): fragment = Fragment() context = {'fields': []} # Build a list of all the fields that can be edited: for field_name in self.editable_fields: field = self.fields[field_name] assert field.scope in (Scope.content, Scope.settings), ( "Only Scope.content or Scope.settings fields can be used with " "StudioEditableXBlockMixin. Other scopes are for user-specific data and are " "not generally created/configured by content authors in Studio." ) field_info = self._make_field_info(field_name, field) if field_info is not None: context["fields"].append(field_info) fragment.content = loader.render_template('templates/studio_edit.html', context) fragment.add_javascript(loader.load_unicode('public/studio_edit.js')) fragment.initialize_js('StudioEditableXBlockMixin') return fragment
[ "def", "studio_view", "(", "self", ",", "context", ")", ":", "fragment", "=", "Fragment", "(", ")", "context", "=", "{", "'fields'", ":", "[", "]", "}", "# Build a list of all the fields that can be edited:", "for", "field_name", "in", "self", ".", "editable_fie...
Render a form for editing this XBlock
[ "Render", "a", "form", "for", "editing", "this", "XBlock" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L78-L98
19,985
edx/xblock-utils
xblockutils/studio_editable.py
StudioEditableXBlockMixin.validate
def validate(self): """ Validates the state of this XBlock. Subclasses should override validate_field_data() to validate fields and override this only for validation not related to this block's field values. """ validation = super(StudioEditableXBlockMixin, self).validate() self.validate_field_data(validation, self) return validation
python
def validate(self): validation = super(StudioEditableXBlockMixin, self).validate() self.validate_field_data(validation, self) return validation
[ "def", "validate", "(", "self", ")", ":", "validation", "=", "super", "(", "StudioEditableXBlockMixin", ",", "self", ")", ".", "validate", "(", ")", "self", ".", "validate_field_data", "(", "validation", ",", "self", ")", "return", "validation" ]
Validates the state of this XBlock. Subclasses should override validate_field_data() to validate fields and override this only for validation not related to this block's field values.
[ "Validates", "the", "state", "of", "this", "XBlock", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L264-L273
19,986
edx/xblock-utils
xblockutils/studio_editable.py
StudioContainerXBlockMixin.render_children
def render_children(self, context, fragment, can_reorder=True, can_add=False): """ Renders the children of the module with HTML appropriate for Studio. If can_reorder is True, then the children will be rendered to support drag and drop. """ contents = [] child_context = {'reorderable_items': set()} if context: child_context.update(context) for child_id in self.children: child = self.runtime.get_block(child_id) if can_reorder: child_context['reorderable_items'].add(child.scope_ids.usage_id) view_to_render = 'author_view' if hasattr(child, 'author_view') else 'student_view' rendered_child = child.render(view_to_render, child_context) fragment.add_frag_resources(rendered_child) contents.append({ 'id': text_type(child.scope_ids.usage_id), 'content': rendered_child.content }) fragment.add_content(self.runtime.render_template("studio_render_children_view.html", { 'items': contents, 'xblock_context': context, 'can_add': can_add, 'can_reorder': can_reorder, }))
python
def render_children(self, context, fragment, can_reorder=True, can_add=False): contents = [] child_context = {'reorderable_items': set()} if context: child_context.update(context) for child_id in self.children: child = self.runtime.get_block(child_id) if can_reorder: child_context['reorderable_items'].add(child.scope_ids.usage_id) view_to_render = 'author_view' if hasattr(child, 'author_view') else 'student_view' rendered_child = child.render(view_to_render, child_context) fragment.add_frag_resources(rendered_child) contents.append({ 'id': text_type(child.scope_ids.usage_id), 'content': rendered_child.content }) fragment.add_content(self.runtime.render_template("studio_render_children_view.html", { 'items': contents, 'xblock_context': context, 'can_add': can_add, 'can_reorder': can_reorder, }))
[ "def", "render_children", "(", "self", ",", "context", ",", "fragment", ",", "can_reorder", "=", "True", ",", "can_add", "=", "False", ")", ":", "contents", "=", "[", "]", "child_context", "=", "{", "'reorderable_items'", ":", "set", "(", ")", "}", "if",...
Renders the children of the module with HTML appropriate for Studio. If can_reorder is True, then the children will be rendered to support drag and drop.
[ "Renders", "the", "children", "of", "the", "module", "with", "HTML", "appropriate", "for", "Studio", ".", "If", "can_reorder", "is", "True", "then", "the", "children", "will", "be", "rendered", "to", "support", "drag", "and", "drop", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L283-L312
19,987
edx/xblock-utils
xblockutils/studio_editable.py
StudioContainerXBlockMixin.author_view
def author_view(self, context): """ Display a the studio editor when the user has clicked "View" to see the container view, otherwise just show the normal 'author_preview_view' or 'student_view' preview. """ root_xblock = context.get('root_xblock') if root_xblock and root_xblock.location == self.location: # User has clicked the "View" link. Show an editable preview of this block's children return self.author_edit_view(context) return self.author_preview_view(context)
python
def author_view(self, context): root_xblock = context.get('root_xblock') if root_xblock and root_xblock.location == self.location: # User has clicked the "View" link. Show an editable preview of this block's children return self.author_edit_view(context) return self.author_preview_view(context)
[ "def", "author_view", "(", "self", ",", "context", ")", ":", "root_xblock", "=", "context", ".", "get", "(", "'root_xblock'", ")", "if", "root_xblock", "and", "root_xblock", ".", "location", "==", "self", ".", "location", ":", "# User has clicked the \"View\" li...
Display a the studio editor when the user has clicked "View" to see the container view, otherwise just show the normal 'author_preview_view' or 'student_view' preview.
[ "Display", "a", "the", "studio", "editor", "when", "the", "user", "has", "clicked", "View", "to", "see", "the", "container", "view", "otherwise", "just", "show", "the", "normal", "author_preview_view", "or", "student_view", "preview", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L314-L324
19,988
edx/xblock-utils
xblockutils/studio_editable.py
StudioContainerXBlockMixin.author_edit_view
def author_edit_view(self, context): """ Child blocks can override this to control the view shown to authors in Studio when editing this block's children. """ fragment = Fragment() self.render_children(context, fragment, can_reorder=True, can_add=False) return fragment
python
def author_edit_view(self, context): fragment = Fragment() self.render_children(context, fragment, can_reorder=True, can_add=False) return fragment
[ "def", "author_edit_view", "(", "self", ",", "context", ")", ":", "fragment", "=", "Fragment", "(", ")", "self", ".", "render_children", "(", "context", ",", "fragment", ",", "can_reorder", "=", "True", ",", "can_add", "=", "False", ")", "return", "fragmen...
Child blocks can override this to control the view shown to authors in Studio when editing this block's children.
[ "Child", "blocks", "can", "override", "this", "to", "control", "the", "view", "shown", "to", "authors", "in", "Studio", "when", "editing", "this", "block", "s", "children", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L326-L333
19,989
edx/xblock-utils
xblockutils/studio_editable.py
XBlockWithPreviewMixin.preview_view
def preview_view(self, context): """ Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context. Default implementation uses author_view if available, otherwise falls back to student_view Child classes can override this method to control their presentation in preview context """ view_to_render = 'author_view' if hasattr(self, 'author_view') else 'student_view' renderer = getattr(self, view_to_render) return renderer(context)
python
def preview_view(self, context): view_to_render = 'author_view' if hasattr(self, 'author_view') else 'student_view' renderer = getattr(self, view_to_render) return renderer(context)
[ "def", "preview_view", "(", "self", ",", "context", ")", ":", "view_to_render", "=", "'author_view'", "if", "hasattr", "(", "self", ",", "'author_view'", ")", "else", "'student_view'", "renderer", "=", "getattr", "(", "self", ",", "view_to_render", ")", "retur...
Preview view - used by StudioContainerWithNestedXBlocksMixin to render nested xblocks in preview context. Default implementation uses author_view if available, otherwise falls back to student_view Child classes can override this method to control their presentation in preview context
[ "Preview", "view", "-", "used", "by", "StudioContainerWithNestedXBlocksMixin", "to", "render", "nested", "xblocks", "in", "preview", "context", ".", "Default", "implementation", "uses", "author_view", "if", "available", "otherwise", "falls", "back", "to", "student_vie...
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L405-L413
19,990
edx/xblock-utils
xblockutils/studio_editable.py
StudioContainerWithNestedXBlocksMixin.get_nested_blocks_spec
def get_nested_blocks_spec(self): """ Converts allowed_nested_blocks items to NestedXBlockSpec to provide common interface """ return [ block_spec if isinstance(block_spec, NestedXBlockSpec) else NestedXBlockSpec(block_spec) for block_spec in self.allowed_nested_blocks ]
python
def get_nested_blocks_spec(self): return [ block_spec if isinstance(block_spec, NestedXBlockSpec) else NestedXBlockSpec(block_spec) for block_spec in self.allowed_nested_blocks ]
[ "def", "get_nested_blocks_spec", "(", "self", ")", ":", "return", "[", "block_spec", "if", "isinstance", "(", "block_spec", ",", "NestedXBlockSpec", ")", "else", "NestedXBlockSpec", "(", "block_spec", ")", "for", "block_spec", "in", "self", ".", "allowed_nested_bl...
Converts allowed_nested_blocks items to NestedXBlockSpec to provide common interface
[ "Converts", "allowed_nested_blocks", "items", "to", "NestedXBlockSpec", "to", "provide", "common", "interface" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L443-L450
19,991
edx/xblock-utils
xblockutils/studio_editable.py
StudioContainerWithNestedXBlocksMixin.author_preview_view
def author_preview_view(self, context): """ View for previewing contents in studio. """ children_contents = [] fragment = Fragment() for child_id in self.children: child = self.runtime.get_block(child_id) child_fragment = self._render_child_fragment(child, context, 'preview_view') fragment.add_frag_resources(child_fragment) children_contents.append(child_fragment.content) render_context = { 'block': self, 'children_contents': children_contents } render_context.update(context) fragment.add_content(self.loader.render_template(self.CHILD_PREVIEW_TEMPLATE, render_context)) return fragment
python
def author_preview_view(self, context): children_contents = [] fragment = Fragment() for child_id in self.children: child = self.runtime.get_block(child_id) child_fragment = self._render_child_fragment(child, context, 'preview_view') fragment.add_frag_resources(child_fragment) children_contents.append(child_fragment.content) render_context = { 'block': self, 'children_contents': children_contents } render_context.update(context) fragment.add_content(self.loader.render_template(self.CHILD_PREVIEW_TEMPLATE, render_context)) return fragment
[ "def", "author_preview_view", "(", "self", ",", "context", ")", ":", "children_contents", "=", "[", "]", "fragment", "=", "Fragment", "(", ")", "for", "child_id", "in", "self", ".", "children", ":", "child", "=", "self", ".", "runtime", ".", "get_block", ...
View for previewing contents in studio.
[ "View", "for", "previewing", "contents", "in", "studio", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L472-L491
19,992
edx/xblock-utils
xblockutils/studio_editable.py
StudioContainerWithNestedXBlocksMixin._render_child_fragment
def _render_child_fragment(self, child, context, view='student_view'): """ Helper method to overcome html block rendering quirks """ try: child_fragment = child.render(view, context) except NoSuchViewError: if child.scope_ids.block_type == 'html' and getattr(self.runtime, 'is_author_mode', False): # html block doesn't support preview_view, and if we use student_view Studio will wrap # it in HTML that we don't want in the preview. So just render its HTML directly: child_fragment = Fragment(child.data) else: child_fragment = child.render('student_view', context) return child_fragment
python
def _render_child_fragment(self, child, context, view='student_view'): try: child_fragment = child.render(view, context) except NoSuchViewError: if child.scope_ids.block_type == 'html' and getattr(self.runtime, 'is_author_mode', False): # html block doesn't support preview_view, and if we use student_view Studio will wrap # it in HTML that we don't want in the preview. So just render its HTML directly: child_fragment = Fragment(child.data) else: child_fragment = child.render('student_view', context) return child_fragment
[ "def", "_render_child_fragment", "(", "self", ",", "child", ",", "context", ",", "view", "=", "'student_view'", ")", ":", "try", ":", "child_fragment", "=", "child", ".", "render", "(", "view", ",", "context", ")", "except", "NoSuchViewError", ":", "if", "...
Helper method to overcome html block rendering quirks
[ "Helper", "method", "to", "overcome", "html", "block", "rendering", "quirks" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/studio_editable.py#L493-L507
19,993
edx/xblock-utils
setup.py
package_data
def package_data(pkg, root_list): """Generic function to find package_data for `pkg` under `root`.""" data = [] for root in root_list: for dirname, _, files in os.walk(os.path.join(pkg, root)): for fname in files: data.append(os.path.relpath(os.path.join(dirname, fname), pkg)) return {pkg: data}
python
def package_data(pkg, root_list): data = [] for root in root_list: for dirname, _, files in os.walk(os.path.join(pkg, root)): for fname in files: data.append(os.path.relpath(os.path.join(dirname, fname), pkg)) return {pkg: data}
[ "def", "package_data", "(", "pkg", ",", "root_list", ")", ":", "data", "=", "[", "]", "for", "root", "in", "root_list", ":", "for", "dirname", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "join", "(", "pkg", ",", ...
Generic function to find package_data for `pkg` under `root`.
[ "Generic", "function", "to", "find", "package_data", "for", "pkg", "under", "root", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/setup.py#L29-L37
19,994
edx/xblock-utils
setup.py
load_requirements
def load_requirements(*requirements_paths): """ Load all requirements from the specified requirements files. Returns a list of requirement strings. """ requirements = set() for path in requirements_paths: requirements.update( line.split('#')[0].strip() for line in open(path).readlines() if is_requirement(line.strip()) ) return list(requirements)
python
def load_requirements(*requirements_paths): requirements = set() for path in requirements_paths: requirements.update( line.split('#')[0].strip() for line in open(path).readlines() if is_requirement(line.strip()) ) return list(requirements)
[ "def", "load_requirements", "(", "*", "requirements_paths", ")", ":", "requirements", "=", "set", "(", ")", "for", "path", "in", "requirements_paths", ":", "requirements", ".", "update", "(", "line", ".", "split", "(", "'#'", ")", "[", "0", "]", ".", "st...
Load all requirements from the specified requirements files. Returns a list of requirement strings.
[ "Load", "all", "requirements", "from", "the", "specified", "requirements", "files", ".", "Returns", "a", "list", "of", "requirement", "strings", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/setup.py#L40-L51
19,995
edx/xblock-utils
setup.py
is_requirement
def is_requirement(line): """ Return True if the requirement line is a package requirement; that is, it is not blank, a comment, a URL, or an included file. """ return not ( line == '' or line.startswith('-r') or line.startswith('#') or line.startswith('-e') or line.startswith('git+') )
python
def is_requirement(line): return not ( line == '' or line.startswith('-r') or line.startswith('#') or line.startswith('-e') or line.startswith('git+') )
[ "def", "is_requirement", "(", "line", ")", ":", "return", "not", "(", "line", "==", "''", "or", "line", ".", "startswith", "(", "'-r'", ")", "or", "line", ".", "startswith", "(", "'#'", ")", "or", "line", ".", "startswith", "(", "'-e'", ")", "or", ...
Return True if the requirement line is a package requirement; that is, it is not blank, a comment, a URL, or an included file.
[ "Return", "True", "if", "the", "requirement", "line", "is", "a", "package", "requirement", ";", "that", "is", "it", "is", "not", "blank", "a", "comment", "a", "URL", "or", "an", "included", "file", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/setup.py#L54-L65
19,996
edx/xblock-utils
xblockutils/publish_event.py
PublishEventMixin.publish_event
def publish_event(self, data, suffix=''): """ AJAX handler to allow client-side code to publish a server-side event """ try: event_type = data.pop('event_type') except KeyError: return {'result': 'error', 'message': 'Missing event_type in JSON data'} return self.publish_event_from_dict(event_type, data)
python
def publish_event(self, data, suffix=''): try: event_type = data.pop('event_type') except KeyError: return {'result': 'error', 'message': 'Missing event_type in JSON data'} return self.publish_event_from_dict(event_type, data)
[ "def", "publish_event", "(", "self", ",", "data", ",", "suffix", "=", "''", ")", ":", "try", ":", "event_type", "=", "data", ".", "pop", "(", "'event_type'", ")", "except", "KeyError", ":", "return", "{", "'result'", ":", "'error'", ",", "'message'", "...
AJAX handler to allow client-side code to publish a server-side event
[ "AJAX", "handler", "to", "allow", "client", "-", "side", "code", "to", "publish", "a", "server", "-", "side", "event" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/publish_event.py#L37-L46
19,997
edx/xblock-utils
xblockutils/publish_event.py
PublishEventMixin.publish_event_from_dict
def publish_event_from_dict(self, event_type, data): """ Combine 'data' with self.additional_publish_event_data and publish an event """ for key, value in self.additional_publish_event_data.items(): if key in data: return {'result': 'error', 'message': 'Key should not be in publish_event data: {}'.format(key)} data[key] = value self.runtime.publish(self, event_type, data) return {'result': 'success'}
python
def publish_event_from_dict(self, event_type, data): for key, value in self.additional_publish_event_data.items(): if key in data: return {'result': 'error', 'message': 'Key should not be in publish_event data: {}'.format(key)} data[key] = value self.runtime.publish(self, event_type, data) return {'result': 'success'}
[ "def", "publish_event_from_dict", "(", "self", ",", "event_type", ",", "data", ")", ":", "for", "key", ",", "value", "in", "self", ".", "additional_publish_event_data", ".", "items", "(", ")", ":", "if", "key", "in", "data", ":", "return", "{", "'result'",...
Combine 'data' with self.additional_publish_event_data and publish an event
[ "Combine", "data", "with", "self", ".", "additional_publish_event_data", "and", "publish", "an", "event" ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/publish_event.py#L48-L58
19,998
edx/xblock-utils
xblockutils/helpers.py
child_isinstance
def child_isinstance(block, child_id, block_class_or_mixin): """ Efficiently check if a child of an XBlock is an instance of the given class. Arguments: block -- the parent (or ancestor) of the child block in question child_id -- the usage key of the child block we are wondering about block_class_or_mixin -- We return true if block's child indentified by child_id is an instance of this. This method is equivalent to isinstance(block.runtime.get_block(child_id), block_class_or_mixin) but is far more efficient, as it avoids the need to instantiate the child. """ def_id = block.runtime.id_reader.get_definition_id(child_id) type_name = block.runtime.id_reader.get_block_type(def_id) child_class = block.runtime.load_block_type(type_name) return issubclass(child_class, block_class_or_mixin)
python
def child_isinstance(block, child_id, block_class_or_mixin): def_id = block.runtime.id_reader.get_definition_id(child_id) type_name = block.runtime.id_reader.get_block_type(def_id) child_class = block.runtime.load_block_type(type_name) return issubclass(child_class, block_class_or_mixin)
[ "def", "child_isinstance", "(", "block", ",", "child_id", ",", "block_class_or_mixin", ")", ":", "def_id", "=", "block", ".", "runtime", ".", "id_reader", ".", "get_definition_id", "(", "child_id", ")", "type_name", "=", "block", ".", "runtime", ".", "id_reade...
Efficiently check if a child of an XBlock is an instance of the given class. Arguments: block -- the parent (or ancestor) of the child block in question child_id -- the usage key of the child block we are wondering about block_class_or_mixin -- We return true if block's child indentified by child_id is an instance of this. This method is equivalent to isinstance(block.runtime.get_block(child_id), block_class_or_mixin) but is far more efficient, as it avoids the need to instantiate the child.
[ "Efficiently", "check", "if", "a", "child", "of", "an", "XBlock", "is", "an", "instance", "of", "the", "given", "class", "." ]
2960666907d3eea1ed312fa87d811e78cd043702
https://github.com/edx/xblock-utils/blob/2960666907d3eea1ed312fa87d811e78cd043702/xblockutils/helpers.py#L6-L25
19,999
Hrabal/TemPy
tempy/elements.py
Tag.attr
def attr(self, *args, **kwargs): """Add an attribute to the element""" kwargs.update({k: bool for k in args}) for key, value in kwargs.items(): if key == "klass": self.attrs["klass"].update(value.split()) elif key == "style": if isinstance(value, str): splitted = iter(re.split(";|:", value)) value = dict(zip(splitted, splitted)) self.attrs["style"].update(value) else: self.attrs[key] = value self._stable = False return self
python
def attr(self, *args, **kwargs): kwargs.update({k: bool for k in args}) for key, value in kwargs.items(): if key == "klass": self.attrs["klass"].update(value.split()) elif key == "style": if isinstance(value, str): splitted = iter(re.split(";|:", value)) value = dict(zip(splitted, splitted)) self.attrs["style"].update(value) else: self.attrs[key] = value self._stable = False return self
[ "def", "attr", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "k", ":", "bool", "for", "k", "in", "args", "}", ")", "for", "key", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":",...
Add an attribute to the element
[ "Add", "an", "attribute", "to", "the", "element" ]
7d229b73e2ce3ccbb8254deae05c1f758f626ed6
https://github.com/Hrabal/TemPy/blob/7d229b73e2ce3ccbb8254deae05c1f758f626ed6/tempy/elements.py#L72-L86