repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
loli/medpy
medpy/features/texture.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/features/texture.py#L268-L280
def local_maxima(vector,min_distance = 4, brd_mode = "wrap"): """ Internal finder for local maxima . Returns UNSORTED indices of maxima in input vector. """ fits = gaussian_filter(numpy.asarray(vector,dtype=numpy.float32),1., mode=brd_mode) for ii in range(len(fits)): if fits[ii] == fits[ii-1]: fits[ii-1] = 0.0 maxfits = maximum_filter(fits, size=min_distance, mode=brd_mode) maxima_mask = fits == maxfits maximum = numpy.transpose(maxima_mask.nonzero()) return numpy.asarray(maximum)
[ "def", "local_maxima", "(", "vector", ",", "min_distance", "=", "4", ",", "brd_mode", "=", "\"wrap\"", ")", ":", "fits", "=", "gaussian_filter", "(", "numpy", ".", "asarray", "(", "vector", ",", "dtype", "=", "numpy", ".", "float32", ")", ",", "1.", ",...
Internal finder for local maxima . Returns UNSORTED indices of maxima in input vector.
[ "Internal", "finder", "for", "local", "maxima", ".", "Returns", "UNSORTED", "indices", "of", "maxima", "in", "input", "vector", "." ]
python
train
genialis/resolwe
resolwe/flow/managers/listener.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L513-L538
def handle_abort(self, obj): """Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], } """ async_to_sync(consumer.send_event)({ WorkerProtocol.COMMAND: WorkerProtocol.ABORT, WorkerProtocol.DATA_ID: obj[ExecutorProtocol.DATA_ID], WorkerProtocol.FINISH_COMMUNICATE_EXTRA: { 'executor': getattr(settings, 'FLOW_EXECUTOR', {}).get('NAME', 'resolwe.flow.executors.local'), }, })
[ "def", "handle_abort", "(", "self", ",", "obj", ")", ":", "async_to_sync", "(", "consumer", ".", "send_event", ")", "(", "{", "WorkerProtocol", ".", "COMMAND", ":", "WorkerProtocol", ".", "ABORT", ",", "WorkerProtocol", ".", "DATA_ID", ":", "obj", "[", "Ex...
Handle an incoming ``Data`` abort processing request. .. IMPORTANT:: This only makes manager's state consistent and doesn't affect Data object in any way. Any changes to the Data must be applied over ``handle_update`` method. :param obj: The Channels message object. Command object format: .. code-block:: none { 'command': 'abort', 'data_id': [id of the :class:`~resolwe.flow.models.Data` object this command was triggered by], }
[ "Handle", "an", "incoming", "Data", "abort", "processing", "request", "." ]
python
train
gwastro/pycbc
pycbc/workflow/pegasus_workflow.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/pegasus_workflow.py#L483-L496
def from_path(cls, path): """Takes a path and returns a File object with the path as the PFN.""" urlparts = urlparse.urlsplit(path) site = 'nonlocal' if (urlparts.scheme == '' or urlparts.scheme == 'file'): if os.path.isfile(urlparts.path): path = os.path.abspath(urlparts.path) path = urlparse.urljoin('file:', urllib.pathname2url(path)) site = 'local' fil = File(os.path.basename(path)) fil.PFN(path, site) return fil
[ "def", "from_path", "(", "cls", ",", "path", ")", ":", "urlparts", "=", "urlparse", ".", "urlsplit", "(", "path", ")", "site", "=", "'nonlocal'", "if", "(", "urlparts", ".", "scheme", "==", "''", "or", "urlparts", ".", "scheme", "==", "'file'", ")", ...
Takes a path and returns a File object with the path as the PFN.
[ "Takes", "a", "path", "and", "returns", "a", "File", "object", "with", "the", "path", "as", "the", "PFN", "." ]
python
train
recruit-tech/summpy
summpy/tools.py
https://github.com/recruit-tech/summpy/blob/b246bd111aa10a8ea11a0aff8c9fce891f52cc58/summpy/tools.py#L25-L57
def sent_splitter_ja(text, delimiters=set(u'。.?!\n\r'), parenthesis=u'()「」『』“”'): ''' Args: text: unicode string that contains multiple Japanese sentences. delimiters: set() of sentence delimiter characters. parenthesis: to be checked its correspondence. Returns: generator that yields sentences. ''' paren_chars = set(parenthesis) close2open = dict(zip(parenthesis[1::2], parenthesis[0::2])) pstack = [] buff = [] for i, c in enumerate(text): c_next = text[i+1] if i+1 < len(text) else None # check correspondence of parenthesis if c in paren_chars: if c in close2open: # close if len(pstack) > 0 and pstack[-1] == close2open[c]: pstack.pop() else: # open pstack.append(c) buff.append(c) if c in delimiters: if len(pstack) == 0 and c_next not in delimiters: yield ''.join(buff) buff = [] if len(buff) > 0: yield ''.join(buff)
[ "def", "sent_splitter_ja", "(", "text", ",", "delimiters", "=", "set", "(", "u'。.?!\\n\\r'),", "", "", "parenthesis", "=", "u'()「」『』“”'):", "", "", "paren_chars", "=", "set", "(", "parenthesis", ")", "close2open", "=", "dict", "(", "zip", "(", "parenthesis",...
Args: text: unicode string that contains multiple Japanese sentences. delimiters: set() of sentence delimiter characters. parenthesis: to be checked its correspondence. Returns: generator that yields sentences.
[ "Args", ":", "text", ":", "unicode", "string", "that", "contains", "multiple", "Japanese", "sentences", ".", "delimiters", ":", "set", "()", "of", "sentence", "delimiter", "characters", ".", "parenthesis", ":", "to", "be", "checked", "its", "correspondence", "...
python
valid
Vital-Fernandez/dazer
bin/lib/Astro_Libraries/f2n.py
https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/Astro_Libraries/f2n.py#L633-L654
def defaultcolour(self, colour): """ Auxiliary method to choose a default colour. Give me a user provided colour : if it is None, I change it to the default colour, respecting negative. Plus, if the image is in RGB mode and you give me 128 for a gray, I translate this to the expected (128, 128, 128) ... """ if colour == None: if self.negative == True: if self.pilimage.mode == "L" : return 0 else : return (0, 0, 0) else : if self.pilimage.mode == "L" : return 255 else : return (255, 255, 255) else : if self.pilimage.mode == "RGB" and type(colour) == type(0): return (colour, colour, colour) else : return colour
[ "def", "defaultcolour", "(", "self", ",", "colour", ")", ":", "if", "colour", "==", "None", ":", "if", "self", ".", "negative", "==", "True", ":", "if", "self", ".", "pilimage", ".", "mode", "==", "\"L\"", ":", "return", "0", "else", ":", "return", ...
Auxiliary method to choose a default colour. Give me a user provided colour : if it is None, I change it to the default colour, respecting negative. Plus, if the image is in RGB mode and you give me 128 for a gray, I translate this to the expected (128, 128, 128) ...
[ "Auxiliary", "method", "to", "choose", "a", "default", "colour", ".", "Give", "me", "a", "user", "provided", "colour", ":", "if", "it", "is", "None", "I", "change", "it", "to", "the", "default", "colour", "respecting", "negative", ".", "Plus", "if", "the...
python
train
taizilongxu/douban.fm
doubanfm/views/history_view.py
https://github.com/taizilongxu/douban.fm/blob/d65126d3bd3e12d8a7109137caff8da0efc22b2f/doubanfm/views/history_view.py#L89-L117
def run(self): '''界面执行程序''' while True: self.display() c = getch.getch() if c == self.KEYS['UP'] or c == 'A' and self.markline != 1: self.updown(-1) elif c == self.KEYS['DOWN'] or c == 'B': self.updown(1) elif c == self.KEYS['QUIT']: self.win.state = 0 break elif c == ' ': self.playsong() elif c == self.KEYS['TOP']: # g键返回顶部 self.markline = 1 self.topline = 0 elif c == self.KEYS['BOTTOM']: # G键返回底部 if len(self.lines) < self.screen_height: self.markline = len(self.lines) - 1 else: self.markline = self.screen_height self.topline = len(self.lines) - self.screen_height - 1 elif c == 'h' or c == 'D': self.state -= 1 if self.state != 0 else -2 self.get_lines() elif c == 'l' or c == 'C': self.state += 1 if self.state != 2 else -2 self.get_lines()
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "self", ".", "display", "(", ")", "c", "=", "getch", ".", "getch", "(", ")", "if", "c", "==", "self", ".", "KEYS", "[", "'UP'", "]", "or", "c", "==", "'A'", "and", "self", ".", "markl...
界面执行程序
[ "界面执行程序" ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/ip/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/ip/__init__.py#L310-L331
def _set_rtm_config(self, v, load=False): """ Setter method for rtm_config, mapped from YANG variable /rbridge_id/ip/rtm_config (container) If this variable is read-only (config: false) in the source YANG file, then _set_rtm_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rtm_config() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=rtm_config.rtm_config, is_container='container', presence=False, yang_name="rtm-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'rtm-config'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rtm_config must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=rtm_config.rtm_config, is_container='container', presence=False, yang_name="rtm-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'rtm-config'}}, namespace='urn:brocade.com:mgmt:brocade-rtm', defining_module='brocade-rtm', yang_type='container', is_config=True)""", }) self.__rtm_config = t if hasattr(self, '_set'): self._set()
[ "def", "_set_rtm_config", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "bas...
Setter method for rtm_config, mapped from YANG variable /rbridge_id/ip/rtm_config (container) If this variable is read-only (config: false) in the source YANG file, then _set_rtm_config is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rtm_config() directly.
[ "Setter", "method", "for", "rtm_config", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "ip", "/", "rtm_config", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "so...
python
train
Microsoft/nni
tools/nni_cmd/command_utils.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/tools/nni_cmd/command_utils.py#L38-L45
def install_package_command(package_name): '''install python package from pip''' #TODO refactor python logic if sys.platform == "win32": cmds = 'python -m pip install --user {0}'.format(package_name) else: cmds = 'python3 -m pip install --user {0}'.format(package_name) call(cmds, shell=True)
[ "def", "install_package_command", "(", "package_name", ")", ":", "#TODO refactor python logic", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "cmds", "=", "'python -m pip install --user {0}'", ".", "format", "(", "package_name", ")", "else", ":", "cmds", "="...
install python package from pip
[ "install", "python", "package", "from", "pip" ]
python
train
mila-iqia/picklable-itertools
picklable_itertools/range.py
https://github.com/mila-iqia/picklable-itertools/blob/e00238867875df0258cf4f83f528d846e7c1afc4/picklable_itertools/range.py#L53-L64
def count(self, i): """rangeobject.count(value) -> integer -- return number of occurrences of value """ if self._stop > self._start and self._step > 0: return int(self._start <= i < self._stop and (i - self._start) % self._step == 0) elif self._stop < self._start and self._step < 0: return int(self._start >= i > self._stop and (i - self._start) % self._step == 0) else: return False
[ "def", "count", "(", "self", ",", "i", ")", ":", "if", "self", ".", "_stop", ">", "self", ".", "_start", "and", "self", ".", "_step", ">", "0", ":", "return", "int", "(", "self", ".", "_start", "<=", "i", "<", "self", ".", "_stop", "and", "(", ...
rangeobject.count(value) -> integer -- return number of occurrences of value
[ "rangeobject", ".", "count", "(", "value", ")", "-", ">", "integer", "--", "return", "number", "of", "occurrences", "of", "value" ]
python
train
BerkeleyAutomation/autolab_core
autolab_core/csv_model.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/csv_model.py#L379-L438
def load(full_filename): """Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed. """ with open(full_filename, 'r') as file: reader = csv.DictReader(file) headers = reader.fieldnames if '_uid' not in headers or '_default' not in headers: raise Exception("Malformed CSVModel file!") all_rows = [row for row in reader] types = all_rows[0] table = [types] default_entry = table[0]['_default'] for i in range(1, len(all_rows)): raw_row = all_rows[i] row = {} for column_name in headers: if raw_row[column_name] != default_entry and column_name != '': if types[column_name] == 'bool': row[column_name] = CSVModel._str_to_bool(raw_row[column_name]) else: try: row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](raw_row[column_name]) except: logging.error('{}, {}, {}'.format(column_name, types[column_name], raw_row[column_name])) row[column_name] = CSVModel._KNOWN_TYPES_MAP[types[column_name]](bool(raw_row[column_name])) else: row[column_name] = default_entry table.append(row) if len(table) == 1: next_valid_uid = 0 else: next_valid_uid = int(table[-1]['_uid']) + 1 headers_init = headers[1:-1] types_init = [types[column_name] for column_name in headers_init] headers_types_list = zip(headers_init, types_init) csv_model = CSVModel(full_filename, headers_types_list, default_entry=default_entry) csv_model._uid = next_valid_uid csv_model._table = table csv_model._save() return csv_model
[ "def", "load", "(", "full_filename", ")", ":", "with", "open", "(", "full_filename", ",", "'r'", ")", "as", "file", ":", "reader", "=", "csv", ".", "DictReader", "(", "file", ")", "headers", "=", "reader", ".", "fieldnames", "if", "'_uid'", "not", "in"...
Load a .csv file into a CSVModel. Parameters ---------- full_filename : :obj:`str` The file path to a .csv file. Returns ------- :obj:`CSVModel` The CSVModel initialized with the data in the given file. Raises ------ Excetpion If the CSV file does not exist or is malformed.
[ "Load", "a", ".", "csv", "file", "into", "a", "CSVModel", "." ]
python
train
odlgroup/odl
odl/trafos/backends/pywt_bindings.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pywt_bindings.py#L46-L51
def pywt_wavelet(wavelet): """Convert ``wavelet`` to a `pywt.Wavelet` instance.""" if isinstance(wavelet, pywt.Wavelet): return wavelet else: return pywt.Wavelet(wavelet)
[ "def", "pywt_wavelet", "(", "wavelet", ")", ":", "if", "isinstance", "(", "wavelet", ",", "pywt", ".", "Wavelet", ")", ":", "return", "wavelet", "else", ":", "return", "pywt", ".", "Wavelet", "(", "wavelet", ")" ]
Convert ``wavelet`` to a `pywt.Wavelet` instance.
[ "Convert", "wavelet", "to", "a", "pywt", ".", "Wavelet", "instance", "." ]
python
train
tadashi-aikawa/owlmixin
owlmixin/__init__.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/__init__.py#L500-L532
def from_yaml(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=True) -> T: """From yaml string to instance :param data: Yaml string :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance Usage: >>> from owlmixin.samples import Human >>> human: Human = Human.from_yaml(''' ... id: 1 ... name: Tom ... favorites: ... - name: Apple ... names_by_lang: ... en: Apple ... de: Apfel ... - name: Orange ... ''') >>> human.id 1 >>> human.name 'Tom' >>> human.favorites[0].names_by_lang.get()["de"] 'Apfel' """ return cls.from_dict(util.load_yaml(data), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
[ "def", "from_yaml", "(", "cls", ",", "data", ":", "str", ",", "force_snake_case", "=", "True", ",", "force_cast", ":", "bool", "=", "False", ",", "restrict", ":", "bool", "=", "True", ")", "->", "T", ":", "return", "cls", ".", "from_dict", "(", "util...
From yaml string to instance :param data: Yaml string :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: Instance Usage: >>> from owlmixin.samples import Human >>> human: Human = Human.from_yaml(''' ... id: 1 ... name: Tom ... favorites: ... - name: Apple ... names_by_lang: ... en: Apple ... de: Apfel ... - name: Orange ... ''') >>> human.id 1 >>> human.name 'Tom' >>> human.favorites[0].names_by_lang.get()["de"] 'Apfel'
[ "From", "yaml", "string", "to", "instance" ]
python
train
Ezhil-Language-Foundation/open-tamil
tamil/numeral.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/tamil/numeral.py#L14-L214
def num2tamilstr( *args ): """ work till one lakh crore - i.e 1e5*1e7 = 1e12. turn number into a numeral, Indian style. Fractions upto 1e-30""" number = args[0] if len(args) < 2: filenames = [] else: filenames = args[1] if len(args) ==3: tensSpecial = args[2] else: tensSpecial='BASIC' if not any( filter( lambda T: isinstance( number, T), [str,unicode,int, long, float]) ) or isinstance(number,complex): raise Exception('num2tamilstr input has to be a long or integer or float') if float(number) > long(1e12): raise Exception('num2tamilstr input is too large') if float(number) < 0: return u"- "+num2tamilstr( -number ) units = (u'பூஜ்ஜியம்', u'ஒன்று', u'இரண்டு', u'மூன்று', u'நான்கு', u'ஐந்து', u'ஆறு', u'ஏழு', u'எட்டு', u'ஒன்பது', u'பத்து') # 0-10 units_suffix = (u'பூஜ்ஜியம்', u'தொன்று', u'திரண்டு', u'மூன்று', u'நான்கு', u'தைந்து', u'தாறு', u'தேழு', u'தெட்டு', u'தொன்பது', u'பத்து') # 0-10 units_suffix_nine = (u'பூஜ்ஜியம்', u'றொன்று', u'றிரண்டு', u'மூன்று', u'நான்கு', u'றைந்து', u'றாறு', u'றேழு', u'றெட்டு', u'றொன்பது', u'பத்து') # 0-10 tween = [1.0,2.0,5.0,6.0,7.0,8.0,9.0] teens = (u'பதினொன்று', u'பனிரண்டு', u'பதிமூன்று', u'பதினான்கு', u'பதினைந்து',u'பதினாறு', u'பதினேழு', u'பதினெட்டு', u'பத்தொன்பது') # 11-19 tens = (u'பத்து', u'இருபது', u'முப்பது', u'நாற்பது', u'ஐம்பது',u'அறுபது', u'எழுபது', u'எண்பது', u'தொன்னூறு') # 10-90 tens_full_prefix = (u'இருபத்து', u'முப்பத்து', u'நாற்பத்து', u'ஐம்பத்து', u'அறுபத்து', u'எழுபத்து', u'எண்பத்து', u'தொன்னூற்று') # 10+-90+ tens_prefix = (u'இருபத்', u'முப்பத்', u'நாற்பத்', u'ஐம்பத்', u'அறுபத்', u'எழுபத்', u'எண்பத்', u'தொன்னூற்') # 10+-90+ hundreds = ( u'நூறு', u'இருநூறு', u'முன்னூறு', u'நாநூறு',u'ஐநூறு', u'அறுநூறு', u'எழுநூறு', u'எண்ணூறு', u'தொள்ளாயிரம்') #100 - 900 hundreds_suffix = (u'நூற்றி', u'இருநூற்று', u'முன்னூற்று', u'நாநூற்று', u'ஐநூற்று', u'அறுநூற்று', u'எழுநூற்று', u'எண்ணூற்று',u'தொள்ளாயிரத்து') #100+ - 900+ one_thousand_prefix = u'ஓர்' thousands = (u'ஆயிரம்',u'ஆயிரத்து') one_prefix = u'ஒரு' lakh = (u'இலட்சம்',u'இலட்சத்து') crore = (u'கோடி',u'கோடியே') pulli = u'புள்ளி' n_one = 1.0 n_ten = 10.0 n_hundred = 100.0 n_thousand = 1000.0 n_lakh = 100.0*n_thousand n_crore = (100.0*n_lakh) # handle fractional parts if float(number) > 0.0 and float(number) < 1.0: rval = [] rval.append(pulli) filenames.append( 'pulli' ) number_str = str(number).replace('0.','') for digit in number_str: filenames.append( "units_%d"%int(digit)) rval.append( units[int(digit)] ) return u' '.join(rval) if isinstance(number,str) or isinstance(number,unicode): result = u"" number = number.strip() assert(len(args) == 1) assert(len(number) > 0) is_negative = number[0] == "-" if is_negative: number = number[1:] frac_part = u"" if number.find(".") >= 0: rat_part,frac_part = number.split(".") frac_part = num2tamilstr(u"0."+frac_part) else: rat_part = number if len(rat_part) > 0: result = num2tamilstr(float(rat_part)) result = result +u" "+ frac_part return is_negative and "-" + result.strip() or result.strip() suffix_base = { n_crore: crore, n_lakh : lakh, n_thousand : thousands} suffix_file_map = {n_crore: "crore", n_lakh : "lakh", n_thousand : "thousands"} file_map = {n_crore :["one_prefix","crore_0"], n_lakh : ["one_prefix","lakh_0"], n_thousand : ["one_thousand_prefix", "thousands_0"], n_hundred : ["hundreds_0"], #special n_ten : ["units_10"], n_one : ["units_1"]} num_map = {n_crore : [one_prefix,crore[0]], n_lakh : [one_prefix,lakh[0]], n_thousand : [one_thousand_prefix, thousands[0]], n_hundred : [hundreds[0]], #special n_ten : [units[10]], n_one : [units[1]]} all_bases = [n_crore, n_lakh, n_thousand, n_hundred, n_ten,n_one] allowed_bases = list(filter( lambda base: number >= base, all_bases )) if len(allowed_bases) >= 1: n_base = allowed_bases[0] if number == n_base: if tensSpecial=='BASIC': filenames.extend(file_map[n_base]) return u" ".join(num_map[n_base]) elif tensSpecial=='NINES': filenames.extend(file_map[n_base]) return units_suffix_nine[long(number%10)] else: filenames.extend(file_map[n_base]) return units_suffix[long(number%10)] quotient_number = long( number/n_base ) residue_number = number - n_base*quotient_number #print number, n_base, quotient_number, residue_number, tensSpecial if n_base == n_one: if isinstance(number,float): int_part = long(number%10) frac = number - float(int_part) filenames.append("units_%d"%int_part) if abs(frac) > 1e-30: if tensSpecial=='BASIC': return units[int_part]+u' ' + num2tamilstr(frac,filenames) elif tensSpecial=='NINES': return units_suffix_nine[int_part]+u' ' + num2tamilstr(frac,filenames) else: return units_suffix[int_part]+u' ' + num2tamilstr(frac,filenames) else: if tensSpecial=='BASIC': return units[int_part] elif tensSpecial=='NINES': return units_suffix_nine[int_part] else: return units_suffix[int_part] else: if tensSpecial=='BASIC': filenames.append("units_%d"%number) return units[number] elif tensSpecial=='NINES': filenames.append("units_%d"%number) return units_suffix_nine[number] else: filenames.append("units_%d"%number) return units_suffix[number] elif n_base == n_ten: if residue_number < 1.0: filenames.append("tens_%d"%(quotient_number-1)) if residue_number == 0.0: return tens[quotient_number-1] #else: //seems not reachable. # numeral = tens[quotient_number-1] elif number < 20: filenames.append("teens_%d"%(number-10)) residue_number = math.fmod(number,1) teen_number = int(math.floor(number - 10)) if residue_number > 1e-30: return teens[teen_number-1] +u' ' + num2tamilstr(residue_number,filenames) else: return teens[teen_number-1]+u' ' if residue_number < 1.0: filenames.append( "tens_%d"%(quotient_number-1) ) numeral = tens[quotient_number-1]+u' ' else: if residue_number in tween: filenames.append( "tens_prefix_%d"%(quotient_number-2) ) numeral = tens_prefix[quotient_number-2] tensSpecial='SPECIAL' if (quotient_number==9): tensSpecial = 'NINES' else: filenames.append( "tens_prefix_%d"%(quotient_number-2) ) numeral = tens_full_prefix[quotient_number-2]+u' ' elif n_base == n_hundred: if residue_number == 0: filenames.append("hundreds_%d"%(quotient_number-1)) return hundreds[quotient_number-1]+u' ' if residue_number < 1.0: filenames.append( "hundreds_%d"%(quotient_number-1) ) numeral = hundreds[quotient_number-1]+u' ' else: filenames.append("hundreds_suffix_%d"%(quotient_number-1)) numeral = hundreds_suffix[quotient_number-1]+u' ' else: if ( quotient_number == 1 ): if n_base == n_thousand: filenames.append("one_thousand_prefix") numeral = one_thousand_prefix else: filenames.append("one_prefix") numeral = one_prefix else: numeral = num2tamilstr( quotient_number, filenames ) if n_base >= n_thousand: suffix = suffix_base[n_base][long(residue_number >= 1)] suffix_filename = "%s_%d"%(suffix_file_map[n_base],long(residue_number >= 1)) if residue_number == 0: filenames.append(suffix_filename) return numeral + u' ' + suffix+u' ' filenames.append(suffix_filename) numeral = numeral + u' ' + suffix+u' ' residue_numeral = num2tamilstr( residue_number, filenames, tensSpecial) #return numeral+u' '+residue_numeral return numeral+residue_numeral # number has to be zero filenames.append("units_0") return units[0]
[ "def", "num2tamilstr", "(", "*", "args", ")", ":", "number", "=", "args", "[", "0", "]", "if", "len", "(", "args", ")", "<", "2", ":", "filenames", "=", "[", "]", "else", ":", "filenames", "=", "args", "[", "1", "]", "if", "len", "(", "args", ...
work till one lakh crore - i.e 1e5*1e7 = 1e12. turn number into a numeral, Indian style. Fractions upto 1e-30
[ "work", "till", "one", "lakh", "crore", "-", "i", ".", "e", "1e5", "*", "1e7", "=", "1e12", ".", "turn", "number", "into", "a", "numeral", "Indian", "style", ".", "Fractions", "upto", "1e", "-", "30" ]
python
train
mozilla/crontabber
crontabber/base.py
https://github.com/mozilla/crontabber/blob/b510be349e71f165c1a9506db95bda0b88728f8b/crontabber/base.py#L139-L157
def convert_frequency(frequency): """return the number of seconds that a certain frequency string represents. For example: `1d` means 1 day which means 60 * 60 * 24 seconds. The recognized formats are: 10d : 10 days 3m : 3 minutes 12h : 12 hours """ number = int(re.findall('\d+', frequency)[0]) unit = re.findall('[^\d]+', frequency)[0] if unit == 'h': number *= 60 * 60 elif unit == 'm': number *= 60 elif unit == 'd': number *= 60 * 60 * 24 elif unit: raise FrequencyDefinitionError(unit) return number
[ "def", "convert_frequency", "(", "frequency", ")", ":", "number", "=", "int", "(", "re", ".", "findall", "(", "'\\d+'", ",", "frequency", ")", "[", "0", "]", ")", "unit", "=", "re", ".", "findall", "(", "'[^\\d]+'", ",", "frequency", ")", "[", "0", ...
return the number of seconds that a certain frequency string represents. For example: `1d` means 1 day which means 60 * 60 * 24 seconds. The recognized formats are: 10d : 10 days 3m : 3 minutes 12h : 12 hours
[ "return", "the", "number", "of", "seconds", "that", "a", "certain", "frequency", "string", "represents", ".", "For", "example", ":", "1d", "means", "1", "day", "which", "means", "60", "*", "60", "*", "24", "seconds", ".", "The", "recognized", "formats", ...
python
train
petl-developers/petl
petl/transform/selects.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/selects.py#L139-L143
def rowlenselect(table, n, complement=False): """Select rows of length `n`.""" where = lambda row: len(row) == n return select(table, where, complement=complement)
[ "def", "rowlenselect", "(", "table", ",", "n", ",", "complement", "=", "False", ")", ":", "where", "=", "lambda", "row", ":", "len", "(", "row", ")", "==", "n", "return", "select", "(", "table", ",", "where", ",", "complement", "=", "complement", ")"...
Select rows of length `n`.
[ "Select", "rows", "of", "length", "n", "." ]
python
train
uw-it-aca/uw-restclients-canvas
uw_canvas/courses.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/courses.py#L46-L51
def get_courses_in_account_by_sis_id(self, sis_account_id, params={}): """ Return a list of courses for the passed account SIS ID. """ return self.get_courses_in_account( self._sis_id(sis_account_id, sis_field="account"), params)
[ "def", "get_courses_in_account_by_sis_id", "(", "self", ",", "sis_account_id", ",", "params", "=", "{", "}", ")", ":", "return", "self", ".", "get_courses_in_account", "(", "self", ".", "_sis_id", "(", "sis_account_id", ",", "sis_field", "=", "\"account\"", ")",...
Return a list of courses for the passed account SIS ID.
[ "Return", "a", "list", "of", "courses", "for", "the", "passed", "account", "SIS", "ID", "." ]
python
test
xenadevel/PyXenaManager
xenamanager/api/xena_cli.py
https://github.com/xenadevel/PyXenaManager/blob/384ca265f73044b8a8b471f5dd7a6103fc54f4df/xenamanager/api/xena_cli.py#L58-L61
def send_command_return(self, obj, command, *arguments): """ Send command and wait for single line output. """ index_command = obj._build_index_command(command, *arguments) return obj._extract_return(command, self.chassis_list[obj.chassis].sendQuery(index_command))
[ "def", "send_command_return", "(", "self", ",", "obj", ",", "command", ",", "*", "arguments", ")", ":", "index_command", "=", "obj", ".", "_build_index_command", "(", "command", ",", "*", "arguments", ")", "return", "obj", ".", "_extract_return", "(", "comma...
Send command and wait for single line output.
[ "Send", "command", "and", "wait", "for", "single", "line", "output", "." ]
python
train
seperman/deepdiff
deepdiff/diff.py
https://github.com/seperman/deepdiff/blob/a66879190fadc671632f154c1fcb82f5c3cef800/deepdiff/diff.py#L491-L562
def __diff_iterable_with_deephash(self, level): """Diff of unhashable iterables. Only used when ignoring the order.""" t1_hashtable = self.__create_hashtable(level.t1, level) t2_hashtable = self.__create_hashtable(level.t2, level) t1_hashes = set(t1_hashtable.keys()) t2_hashes = set(t2_hashtable.keys()) hashes_added = t2_hashes - t1_hashes hashes_removed = t1_hashes - t2_hashes if self.report_repetition: for hash_value in hashes_added: for i in t2_hashtable[hash_value].indexes: change_level = level.branch_deeper( notpresent, t2_hashtable[hash_value].item, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=i ) # TODO: what is this value exactly? self.__report_result('iterable_item_added', change_level) for hash_value in hashes_removed: for i in t1_hashtable[hash_value].indexes: change_level = level.branch_deeper( t1_hashtable[hash_value].item, notpresent, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=i) self.__report_result('iterable_item_removed', change_level) items_intersect = t2_hashes.intersection(t1_hashes) for hash_value in items_intersect: t1_indexes = t1_hashtable[hash_value].indexes t2_indexes = t2_hashtable[hash_value].indexes t1_indexes_len = len(t1_indexes) t2_indexes_len = len(t2_indexes) if t1_indexes_len != t2_indexes_len: # this is a repetition change! # create "change" entry, keep current level untouched to handle further changes repetition_change_level = level.branch_deeper( t1_hashtable[hash_value].item, t2_hashtable[hash_value].item, # nb: those are equal! child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=t1_hashtable[hash_value] .indexes[0]) repetition_change_level.additional['repetition'] = RemapDict( old_repeat=t1_indexes_len, new_repeat=t2_indexes_len, old_indexes=t1_indexes, new_indexes=t2_indexes) self.__report_result('repetition_change', repetition_change_level) else: for hash_value in hashes_added: change_level = level.branch_deeper( notpresent, t2_hashtable[hash_value].item, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=t2_hashtable[hash_value].indexes[ 0]) # TODO: what is this value exactly? self.__report_result('iterable_item_added', change_level) for hash_value in hashes_removed: change_level = level.branch_deeper( t1_hashtable[hash_value].item, notpresent, child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie! child_relationship_param=t1_hashtable[hash_value].indexes[ 0]) self.__report_result('iterable_item_removed', change_level)
[ "def", "__diff_iterable_with_deephash", "(", "self", ",", "level", ")", ":", "t1_hashtable", "=", "self", ".", "__create_hashtable", "(", "level", ".", "t1", ",", "level", ")", "t2_hashtable", "=", "self", ".", "__create_hashtable", "(", "level", ".", "t2", ...
Diff of unhashable iterables. Only used when ignoring the order.
[ "Diff", "of", "unhashable", "iterables", ".", "Only", "used", "when", "ignoring", "the", "order", "." ]
python
train
DataONEorg/d1_python
lib_client/src/d1_client/iter/objectlist.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/iter/objectlist.py#L188-L213
def _loadMore(self, start=0, trys=0, validation=True): """Retrieves the next page of results.""" self._log.debug("Loading page starting from %d" % start) self._czero = start self._pageoffs = 0 try: pyxb.RequireValidWhenParsing(validation) self._object_list = self._client.listObjects( start=start, count=self._pagesize, fromDate=self._fromDate, nodeId=self._nodeId, ) except http.client.BadStatusLine as e: self._log.warning("Server responded with Bad Status Line. Retrying in 5sec") self._client.connection.close() if trys > 3: raise e trys += 1 self._loadMore(start, trys) except d1_common.types.exceptions.ServiceFailure as e: self._log.error(e) if trys > 3: raise e trys += 1 self._loadMore(start, trys, validation=False)
[ "def", "_loadMore", "(", "self", ",", "start", "=", "0", ",", "trys", "=", "0", ",", "validation", "=", "True", ")", ":", "self", ".", "_log", ".", "debug", "(", "\"Loading page starting from %d\"", "%", "start", ")", "self", ".", "_czero", "=", "start...
Retrieves the next page of results.
[ "Retrieves", "the", "next", "page", "of", "results", "." ]
python
train
ANTsX/ANTsPy
ants/viz/plot.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/viz/plot.py#L91-L124
def plot_hist(image, threshold=0., fit_line=False, normfreq=True, ## plot label arguments title=None, grid=True, xlabel=None, ylabel=None, ## other plot arguments facecolor='green', alpha=0.75): """ Plot a histogram from an ANTsImage Arguments --------- image : ANTsImage image from which histogram will be created """ img_arr = image.numpy().flatten() img_arr = img_arr[np.abs(img_arr) > threshold] if normfreq != False: normfreq = 1. if normfreq == True else normfreq n, bins, patches = plt.hist(img_arr, 50, normed=normfreq, facecolor=facecolor, alpha=alpha) if fit_line: # add a 'best fit' line y = mlab.normpdf( bins, img_arr.mean(), img_arr.std()) l = plt.plot(bins, y, 'r--', linewidth=1) if xlabel is not None: plt.xlabel(xlabel) if ylabel is not None: plt.ylabel(ylabel) if title is not None: plt.title(title) plt.grid(grid) plt.show()
[ "def", "plot_hist", "(", "image", ",", "threshold", "=", "0.", ",", "fit_line", "=", "False", ",", "normfreq", "=", "True", ",", "## plot label arguments", "title", "=", "None", ",", "grid", "=", "True", ",", "xlabel", "=", "None", ",", "ylabel", "=", ...
Plot a histogram from an ANTsImage Arguments --------- image : ANTsImage image from which histogram will be created
[ "Plot", "a", "histogram", "from", "an", "ANTsImage" ]
python
train
stuaxo/vext
setup.py
https://github.com/stuaxo/vext/blob/fa98a21ecfbbc1c3d1b84085d69ec42defdd2f69/setup.py#L244-L273
def run(self): """ Need to find any pre-existing vext contained in dependent packages and install them example: you create a setup.py with install_requires["vext.gi"]: - vext.gi gets installed using bdist_egg - vext itself is now called with bdist_egg and we end up here Vext now needs to find and install .vext files in vext.gi [or any other files that depend on vext] :return: """ logger.debug("vext InstallLib [started]") # Find packages that depend on vext and check for .vext files... logger.debug("find_vext_files") vext_files = self.find_vext_files() logger.debug("manually_install_vext: ", vext_files) self.manually_install_vext(vext_files) logger.debug("enable vext") self.enable_vext() logger.debug("install_lib.run") install_lib.run(self) logger.debug("vext InstallLib [finished]")
[ "def", "run", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"vext InstallLib [started]\"", ")", "# Find packages that depend on vext and check for .vext files...", "logger", ".", "debug", "(", "\"find_vext_files\"", ")", "vext_files", "=", "self", ".", "find_vex...
Need to find any pre-existing vext contained in dependent packages and install them example: you create a setup.py with install_requires["vext.gi"]: - vext.gi gets installed using bdist_egg - vext itself is now called with bdist_egg and we end up here Vext now needs to find and install .vext files in vext.gi [or any other files that depend on vext] :return:
[ "Need", "to", "find", "any", "pre", "-", "existing", "vext", "contained", "in", "dependent", "packages", "and", "install", "them" ]
python
train
Apitax/Apitax
apitax/api/controllers/migrations/scriptax_controller.py
https://github.com/Apitax/Apitax/blob/3883e45f17e01eba4edac9d1bba42f0e7a748682/apitax/api/controllers/migrations/scriptax_controller.py#L76-L102
def get_driver_script(name, name2=None): # noqa: E501 """Retrieve the contents of a script Retrieve the contents of a script # noqa: E501 :param name2: Get status of a driver with this name :type name2: str :param name: The script name. :type name: str :rtype: Response """ response = errorIfUnauthorized(role='user') if response: return response else: response = ApitaxResponse() print(name) print(name2) driver: Driver = LoadedDrivers.getDriver(name2) response.body.add({'content': driver.getDriverScript(name)}) return Response(status=200, body=response.getResponseBody())
[ "def", "get_driver_script", "(", "name", ",", "name2", "=", "None", ")", ":", "# noqa: E501", "response", "=", "errorIfUnauthorized", "(", "role", "=", "'user'", ")", "if", "response", ":", "return", "response", "else", ":", "response", "=", "ApitaxResponse", ...
Retrieve the contents of a script Retrieve the contents of a script # noqa: E501 :param name2: Get status of a driver with this name :type name2: str :param name: The script name. :type name: str :rtype: Response
[ "Retrieve", "the", "contents", "of", "a", "script" ]
python
train
saltstack/salt
salt/states/alternatives.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/alternatives.py#L162-L190
def auto(name): ''' .. versionadded:: 0.17.0 Instruct alternatives to use the highest priority path for <name> name is the master name for this link group (e.g. pager) ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} display = __salt__['alternatives.display'](name) line = display.splitlines()[0] if line.endswith(' auto mode'): ret['comment'] = '{0} already in auto mode'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} will be put in auto mode'.format(name) ret['result'] = None return ret ret['changes']['result'] = __salt__['alternatives.auto'](name) return ret
[ "def", "auto", "(", "name", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "display", "=", "__salt__", "[", "'alternatives.display'", "]", "(", "name"...
.. versionadded:: 0.17.0 Instruct alternatives to use the highest priority path for <name> name is the master name for this link group (e.g. pager)
[ "..", "versionadded", "::", "0", ".", "17", ".", "0" ]
python
train
ralphje/imagemounter
imagemounter/volume_system.py
https://github.com/ralphje/imagemounter/blob/86213781c366cad65096447d91f522f0a3fb4b93/imagemounter/volume_system.py#L411-L482
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on mmls.""" try: cmd = ['mmls'] if volume_system.parent.offset: cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)]) if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'): cmd.extend(['-t', vstype]) cmd.append(volume_system.parent.get_raw_path()) output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()] output = _util.check_output_(cmd, stderr=subprocess.STDOUT) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing mmls command") raise SubsystemError(e) else: logger.exception("Failed executing mmls command") raise SubsystemError(e) output = output.split("Description", 1)[-1] for line in output.splitlines(): if not line: continue # noinspection PyBroadException try: values = line.split(None, 5) # sometimes there are only 5 elements available description = '' index, slot, start, end, length = values[0:5] if len(values) > 5: description = values[5] volume = volume_system._make_subvolume( index=self._format_index(volume_system, int(index[:-1])), offset=int(start) * volume_system.disk.block_size, size=int(length) * volume_system.disk.block_size ) volume.info['fsdescription'] = description except Exception: logger.exception("Error while parsing mmls output") continue if slot.lower() == 'meta': volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length)) elif slot.lower().startswith('-----'): volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length)) else: volume.flag = 'alloc' if ":" in slot: volume.slot = _util.determine_slot(*slot.split(':')) else: volume.slot = _util.determine_slot(-1, slot) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length, volume.info['fsdescription'])) yield volume
[ "def", "detect", "(", "self", ",", "volume_system", ",", "vstype", "=", "'detect'", ")", ":", "try", ":", "cmd", "=", "[", "'mmls'", "]", "if", "volume_system", ".", "parent", ".", "offset", ":", "cmd", ".", "extend", "(", "[", "'-o'", ",", "str", ...
Finds and mounts all volumes based on mmls.
[ "Finds", "and", "mounts", "all", "volumes", "based", "on", "mmls", "." ]
python
train
aquatix/python-utilkit
utilkit/stringutil.py
https://github.com/aquatix/python-utilkit/blob/1b4a4175381d2175592208619315f399610f915c/utilkit/stringutil.py#L23-L33
def safe_str(obj): """ return the byte string representation of obj """ try: return str(obj) except UnicodeEncodeError: # obj is unicode try: return unicode(obj).encode('unicode_escape') # noqa for undefined-variable except NameError: # This is Python 3, just return the obj as it's already unicode return obj
[ "def", "safe_str", "(", "obj", ")", ":", "try", ":", "return", "str", "(", "obj", ")", "except", "UnicodeEncodeError", ":", "# obj is unicode", "try", ":", "return", "unicode", "(", "obj", ")", ".", "encode", "(", "'unicode_escape'", ")", "# noqa for undefin...
return the byte string representation of obj
[ "return", "the", "byte", "string", "representation", "of", "obj" ]
python
train
Ceasar/twosheds
twosheds/completer.py
https://github.com/Ceasar/twosheds/blob/55b0a207e3a06b85e9a9567069b3822a651501a7/twosheds/completer.py#L142-L153
def gen_filename_completions(self, word, filenames): """Generate a sequence of filenames that match ``word``. :param word: the word to complete """ if not word: return filenames else: trie = pygtrie.CharTrie() for filename in filenames: trie[filename] = filename return trie.iterkeys(prefix=word)
[ "def", "gen_filename_completions", "(", "self", ",", "word", ",", "filenames", ")", ":", "if", "not", "word", ":", "return", "filenames", "else", ":", "trie", "=", "pygtrie", ".", "CharTrie", "(", ")", "for", "filename", "in", "filenames", ":", "trie", "...
Generate a sequence of filenames that match ``word``. :param word: the word to complete
[ "Generate", "a", "sequence", "of", "filenames", "that", "match", "word", "." ]
python
train
tanghaibao/goatools
goatools/obo_parser.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/obo_parser.py#L213-L219
def get_all_upper(self): """Return all parent GO IDs through both 'is_a' and all relationships.""" all_upper = set() for upper in self.get_goterms_upper(): all_upper.add(upper.item_id) all_upper |= upper.get_all_upper() return all_upper
[ "def", "get_all_upper", "(", "self", ")", ":", "all_upper", "=", "set", "(", ")", "for", "upper", "in", "self", ".", "get_goterms_upper", "(", ")", ":", "all_upper", ".", "add", "(", "upper", ".", "item_id", ")", "all_upper", "|=", "upper", ".", "get_a...
Return all parent GO IDs through both 'is_a' and all relationships.
[ "Return", "all", "parent", "GO", "IDs", "through", "both", "is_a", "and", "all", "relationships", "." ]
python
train
facelessuser/backrefs
backrefs/_bre_parse.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bre_parse.py#L1276-L1282
def get_single_stack(self): """Get the correct single stack item to use.""" single = None while self.single_stack: single = self.single_stack.pop() return single
[ "def", "get_single_stack", "(", "self", ")", ":", "single", "=", "None", "while", "self", ".", "single_stack", ":", "single", "=", "self", ".", "single_stack", ".", "pop", "(", ")", "return", "single" ]
Get the correct single stack item to use.
[ "Get", "the", "correct", "single", "stack", "item", "to", "use", "." ]
python
train
J535D165/recordlinkage
recordlinkage/classifiers.py
https://github.com/J535D165/recordlinkage/blob/87a5f4af904e0834047cd07ff1c70146b1e6d693/recordlinkage/classifiers.py#L281-L287
def _initialise_classifier(self, comparison_vectors): """Set the centers of the clusters.""" # Set the start point of the classifier. self.kernel.init = numpy.array( [[0.05] * len(list(comparison_vectors)), [0.95] * len(list(comparison_vectors))])
[ "def", "_initialise_classifier", "(", "self", ",", "comparison_vectors", ")", ":", "# Set the start point of the classifier.", "self", ".", "kernel", ".", "init", "=", "numpy", ".", "array", "(", "[", "[", "0.05", "]", "*", "len", "(", "list", "(", "comparison...
Set the centers of the clusters.
[ "Set", "the", "centers", "of", "the", "clusters", "." ]
python
train
nugget/python-insteonplm
insteonplm/states/onOff.py
https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/states/onOff.py#L241-L245
def off(self): """Send OFF command to device.""" self._send_method(StandardSend(self._address, COMMAND_LIGHT_OFF_0X13_0X00), self._off_message_received)
[ "def", "off", "(", "self", ")", ":", "self", ".", "_send_method", "(", "StandardSend", "(", "self", ".", "_address", ",", "COMMAND_LIGHT_OFF_0X13_0X00", ")", ",", "self", ".", "_off_message_received", ")" ]
Send OFF command to device.
[ "Send", "OFF", "command", "to", "device", "." ]
python
train
dcrosta/sendlib
sendlib.py
https://github.com/dcrosta/sendlib/blob/51ea5412a70cf83a62d51d5c515c0eeac725aea0/sendlib.py#L628-L680
def parse(schema): """ Parse `schema`, either a string or a file-like object, and return a :class:`MessageRegistry` with the loaded messages. """ if not isinstance(schema, basestring): # assume it is file-like schema = schema.read() message = re.compile(r'^\(([^,]+),\s*(\d+)\):\s*$') field = re.compile(r'^-\s*([^:]+):\s+(.+?)\s*$') registry = MessageRegistry({}) messages = registry.messages curr = None names = None for lineno, line in enumerate(schema.split('\n')): line = line.strip() if '#' in line: line = line[:line.index('#')] if line == '': continue f = field.match(line) if f: if curr is None: raise ParseError( 'field definition outside of message at line %d' % lineno) name = f.group(1) type = f.group(2) if name not in names: f = Field(curr, name, type) curr.fields.append(f) names.add(name) continue else: raise ParseError( 'duplicate field name "%s" at line %d' % (name, lineno)) m = message.match(line) if m: # new message definition name, vers = m.group(1), int(m.group(2)) if (name, vers) in messages: raise ParseError('Duplicate message (%s, %d)' % (name, vers)) curr = messages[(name, vers)] = Message(registry, name, vers, []) names = set() continue for message in registry.messages.values(): message.fields = tuple(message.fields) return registry
[ "def", "parse", "(", "schema", ")", ":", "if", "not", "isinstance", "(", "schema", ",", "basestring", ")", ":", "# assume it is file-like", "schema", "=", "schema", ".", "read", "(", ")", "message", "=", "re", ".", "compile", "(", "r'^\\(([^,]+),\\s*(\\d+)\\...
Parse `schema`, either a string or a file-like object, and return a :class:`MessageRegistry` with the loaded messages.
[ "Parse", "schema", "either", "a", "string", "or", "a", "file", "-", "like", "object", "and", "return", "a", ":", "class", ":", "MessageRegistry", "with", "the", "loaded", "messages", "." ]
python
train
Iotic-Labs/py-IoticAgent
src/IoticAgent/IOT/utils.py
https://github.com/Iotic-Labs/py-IoticAgent/blob/893e8582ad1dacfe32dfc0ee89452bbd6f57d28d/src/IoticAgent/IOT/utils.py#L73-L84
def private_name_for(var_name, cls): """Returns mangled variable name (if applicable) for the given variable and class instance. See https://docs.python.org/3/tutorial/classes.html#private-variables""" if not (isinstance(var_name, string_types) and var_name): raise TypeError('var_name must be non-empty string') if not (isinstance(cls, type) or isinstance(cls, string_types)): # pylint: disable=consider-merging-isinstance raise TypeError('cls not a class or string') if __PRIVATE_NAME_PATTERN.match(var_name): class_name = cls.__name__ if isinstance(cls, type) else cls return '_%s%s' % (class_name.lstrip('_'), var_name) else: return var_name
[ "def", "private_name_for", "(", "var_name", ",", "cls", ")", ":", "if", "not", "(", "isinstance", "(", "var_name", ",", "string_types", ")", "and", "var_name", ")", ":", "raise", "TypeError", "(", "'var_name must be non-empty string'", ")", "if", "not", "(", ...
Returns mangled variable name (if applicable) for the given variable and class instance. See https://docs.python.org/3/tutorial/classes.html#private-variables
[ "Returns", "mangled", "variable", "name", "(", "if", "applicable", ")", "for", "the", "given", "variable", "and", "class", "instance", ".", "See", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "tutorial", "/", "classes", ".", "h...
python
train
DEIB-GECO/PyGMQL
gmql/ml/algorithms/clustering.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/ml/algorithms/clustering.py#L248-L263
def retrieve_cluster(self, df, cluster_no): """ Extracts the cluster at the given index from the input dataframe :param df: the dataframe that contains the clusters :param cluster_no: the cluster number :return: returns the extracted cluster """ if self.is_pyclustering_instance(self.model): clusters = self.model.get_clusters() mask = [] for i in range(0, df.shape[0]): mask.append(i in clusters[cluster_no]) else: mask = self.model.labels_ == cluster_no # a boolean mask return df[mask]
[ "def", "retrieve_cluster", "(", "self", ",", "df", ",", "cluster_no", ")", ":", "if", "self", ".", "is_pyclustering_instance", "(", "self", ".", "model", ")", ":", "clusters", "=", "self", ".", "model", ".", "get_clusters", "(", ")", "mask", "=", "[", ...
Extracts the cluster at the given index from the input dataframe :param df: the dataframe that contains the clusters :param cluster_no: the cluster number :return: returns the extracted cluster
[ "Extracts", "the", "cluster", "at", "the", "given", "index", "from", "the", "input", "dataframe" ]
python
train
juju/charm-helpers
charmhelpers/core/unitdata.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/core/unitdata.py#L326-L353
def delta(self, mapping, prefix): """ return a delta containing values that have changed. """ previous = self.getrange(prefix, strip=True) if not previous: pk = set() else: pk = set(previous.keys()) ck = set(mapping.keys()) delta = DeltaSet() # added for k in ck.difference(pk): delta[k] = Delta(None, mapping[k]) # removed for k in pk.difference(ck): delta[k] = Delta(previous[k], None) # changed for k in pk.intersection(ck): c = mapping[k] p = previous[k] if c != p: delta[k] = Delta(p, c) return delta
[ "def", "delta", "(", "self", ",", "mapping", ",", "prefix", ")", ":", "previous", "=", "self", ".", "getrange", "(", "prefix", ",", "strip", "=", "True", ")", "if", "not", "previous", ":", "pk", "=", "set", "(", ")", "else", ":", "pk", "=", "set"...
return a delta containing values that have changed.
[ "return", "a", "delta", "containing", "values", "that", "have", "changed", "." ]
python
train
dailymuse/oz
oz/aws_cdn/__init__.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/aws_cdn/__init__.py#L33-L35
def set_cache_buster(redis, path, hash): """Sets the cache buster value for a given file path""" redis.hset("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path, hash)
[ "def", "set_cache_buster", "(", "redis", ",", "path", ",", "hash", ")", ":", "redis", ".", "hset", "(", "\"cache-buster:{}:v3\"", ".", "format", "(", "oz", ".", "settings", "[", "\"s3_bucket\"", "]", ")", ",", "path", ",", "hash", ")" ]
Sets the cache buster value for a given file path
[ "Sets", "the", "cache", "buster", "value", "for", "a", "given", "file", "path" ]
python
train
xsleonard/pystmark
pystmark.py
https://github.com/xsleonard/pystmark/blob/329ccae1a7c8d57f28fa72cd8dbbee3e39413ed6/pystmark.py#L205-L217
def get_bounce_tags(api_key=None, secure=None, test=None, **request_args): '''Get a list of tags for bounces associated with your Postmark server. :param api_key: Your Postmark API key. Required, if `test` is not `True`. :param secure: Use the https scheme for the Postmark API. Defaults to `True` :param test: Use the Postmark Test API. Defaults to `False`. :param \*\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`BounceTagsResponse` ''' return _default_bounce_tags.get(api_key=api_key, secure=secure, test=test, **request_args)
[ "def", "get_bounce_tags", "(", "api_key", "=", "None", ",", "secure", "=", "None", ",", "test", "=", "None", ",", "*", "*", "request_args", ")", ":", "return", "_default_bounce_tags", ".", "get", "(", "api_key", "=", "api_key", ",", "secure", "=", "secur...
Get a list of tags for bounces associated with your Postmark server. :param api_key: Your Postmark API key. Required, if `test` is not `True`. :param secure: Use the https scheme for the Postmark API. Defaults to `True` :param test: Use the Postmark Test API. Defaults to `False`. :param \*\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`BounceTagsResponse`
[ "Get", "a", "list", "of", "tags", "for", "bounces", "associated", "with", "your", "Postmark", "server", "." ]
python
train
acroz/pylivy
livy/client.py
https://github.com/acroz/pylivy/blob/14fc65e19434c51ec959c92acb0925b87a6e3569/livy/client.py#L229-L249
def create_statement( self, session_id: int, code: str, kind: StatementKind = None ) -> Statement: """Run a statement in a session. :param session_id: The ID of the session. :param code: The code to execute. :param kind: The kind of code to execute. """ data = {"code": code} if kind is not None: if self.legacy_server(): LOGGER.warning("statement kind ignored on Livy<0.5.0") data["kind"] = kind.value response = self._client.post( f"/sessions/{session_id}/statements", data=data ) return Statement.from_json(session_id, response)
[ "def", "create_statement", "(", "self", ",", "session_id", ":", "int", ",", "code", ":", "str", ",", "kind", ":", "StatementKind", "=", "None", ")", "->", "Statement", ":", "data", "=", "{", "\"code\"", ":", "code", "}", "if", "kind", "is", "not", "N...
Run a statement in a session. :param session_id: The ID of the session. :param code: The code to execute. :param kind: The kind of code to execute.
[ "Run", "a", "statement", "in", "a", "session", "." ]
python
train
astropy/photutils
photutils/aperture/ellipse.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/aperture/ellipse.py#L465-L487
def to_pixel(self, wcs, mode='all'): """ Convert the aperture to an `EllipticalAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAnnulus` object An `EllipticalAnnulus` object. """ pixel_params = self._to_pixel_params(wcs, mode=mode) return EllipticalAnnulus(**pixel_params)
[ "def", "to_pixel", "(", "self", ",", "wcs", ",", "mode", "=", "'all'", ")", ":", "pixel_params", "=", "self", ".", "_to_pixel_params", "(", "wcs", ",", "mode", "=", "mode", ")", "return", "EllipticalAnnulus", "(", "*", "*", "pixel_params", ")" ]
Convert the aperture to an `EllipticalAnnulus` object defined in pixel coordinates. Parameters ---------- wcs : `~astropy.wcs.WCS` The world coordinate system (WCS) transformation to use. mode : {'all', 'wcs'}, optional Whether to do the transformation including distortions (``'all'``; default) or only including only the core WCS transformation (``'wcs'``). Returns ------- aperture : `EllipticalAnnulus` object An `EllipticalAnnulus` object.
[ "Convert", "the", "aperture", "to", "an", "EllipticalAnnulus", "object", "defined", "in", "pixel", "coordinates", "." ]
python
train
nats-io/asyncio-nats-streaming
stan/aio/client.py
https://github.com/nats-io/asyncio-nats-streaming/blob/344d28f645e6dd0e0b7938516b83e2e6f3c3c9f5/stan/aio/client.py#L241-L300
async def publish(self, subject, payload, ack_handler=None, ack_wait=DEFAULT_ACK_WAIT, ): """ Publishes a payload onto a subject. By default, it will block until the message which has been published has been acked back. An optional async handler can be publi :param subject: Subject of the message. :param payload: Payload of the message which wil be published. :param ack_handler: Optional handler for async publishing. :param ack_wait: How long in seconds to wait for an ack to be received. """ stan_subject = ''.join([self._pub_prefix, '.', subject]) guid = new_guid() pe = protocol.PubMsg() pe.clientID = self._client_id pe.guid = guid pe.subject = subject pe.data = payload # Control max inflight pubs for the client with a buffered queue. await self._pending_pub_acks_queue.put(None) # Process asynchronously if a handler is given. if ack_handler is not None: self._pub_ack_map[guid] = ack_handler try: await self._nc.publish_request( stan_subject, self._ack_subject, pe.SerializeToString(), ) return except Exception as e: del self._pub_ack_map[guid] raise e else: # Synchronous wait for ack handling. future = asyncio.Future(loop=self._loop) async def cb(pub_ack): nonlocal future future.set_result(pub_ack) self._pub_ack_map[guid] = cb try: await self._nc.publish_request( stan_subject, self._ack_subject, pe.SerializeToString(), ) await asyncio.wait_for(future, ack_wait, loop=self._loop) return future.result() except Exception as e: # Remove pending future before raising error. future.cancel() del self._pub_ack_map[guid] raise e
[ "async", "def", "publish", "(", "self", ",", "subject", ",", "payload", ",", "ack_handler", "=", "None", ",", "ack_wait", "=", "DEFAULT_ACK_WAIT", ",", ")", ":", "stan_subject", "=", "''", ".", "join", "(", "[", "self", ".", "_pub_prefix", ",", "'.'", ...
Publishes a payload onto a subject. By default, it will block until the message which has been published has been acked back. An optional async handler can be publi :param subject: Subject of the message. :param payload: Payload of the message which wil be published. :param ack_handler: Optional handler for async publishing. :param ack_wait: How long in seconds to wait for an ack to be received.
[ "Publishes", "a", "payload", "onto", "a", "subject", ".", "By", "default", "it", "will", "block", "until", "the", "message", "which", "has", "been", "published", "has", "been", "acked", "back", ".", "An", "optional", "async", "handler", "can", "be", "publi...
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/core_v1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/core_v1_api.py#L6863-L6887
def create_namespaced_secret(self, namespace, body, **kwargs): # noqa: E501 """create_namespaced_secret # noqa: E501 create a Secret # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_secret(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Secret body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Secret If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_secret_with_http_info(namespace, body, **kwargs) # noqa: E501 else: (data) = self.create_namespaced_secret_with_http_info(namespace, body, **kwargs) # noqa: E501 return data
[ "def", "create_namespaced_secret", "(", "self", ",", "namespace", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return",...
create_namespaced_secret # noqa: E501 create a Secret # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_secret(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Secret body: (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Secret If the method is called asynchronously, returns the request thread.
[ "create_namespaced_secret", "#", "noqa", ":", "E501" ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/agent/vdp/lldpad.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/vdp/lldpad.py#L536-L545
def get_vdp_failure_reason(self, reply): """Parse the failure reason from VDP. """ try: fail_reason = reply.partition( "filter")[0].replace('\t', '').split('\n')[-2] if len(fail_reason) == 0: fail_reason = vdp_const.retrieve_failure_reason % (reply) except Exception: fail_reason = vdp_const.retrieve_failure_reason % (reply) return fail_reason
[ "def", "get_vdp_failure_reason", "(", "self", ",", "reply", ")", ":", "try", ":", "fail_reason", "=", "reply", ".", "partition", "(", "\"filter\"", ")", "[", "0", "]", ".", "replace", "(", "'\\t'", ",", "''", ")", ".", "split", "(", "'\\n'", ")", "["...
Parse the failure reason from VDP.
[ "Parse", "the", "failure", "reason", "from", "VDP", "." ]
python
train
citronneur/rdpy
rdpy/security/pyDes.py
https://github.com/citronneur/rdpy/blob/4109b7a6fe2abf3ddbaed54e29d2f31e63ed97f6/rdpy/security/pyDes.py#L485-L560
def __des_crypt(self, block, crypt_type): """Crypt the block of data through DES bit-manipulation""" block = self.__permutate(des.__ip, block) self.L = block[:32] self.R = block[32:] # Encryption starts from Kn[1] through to Kn[16] if crypt_type == des.ENCRYPT: iteration = 0 iteration_adjustment = 1 # Decryption starts from Kn[16] down to Kn[1] else: iteration = 15 iteration_adjustment = -1 i = 0 while i < 16: # Make a copy of R[i-1], this will later become L[i] tempR = self.R[:] # Permutate R[i - 1] to start creating R[i] self.R = self.__permutate(des.__expansion_table, self.R) # Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration])) B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]] # Optimization: Replaced below commented code with above #j = 0 #B = [] #while j < len(self.R): # self.R[j] = self.R[j] ^ self.Kn[iteration][j] # j += 1 # if j % 6 == 0: # B.append(self.R[j-6:j]) # Permutate B[1] to B[8] using the S-Boxes j = 0 Bn = [0] * 32 pos = 0 while j < 8: # Work out the offsets m = (B[j][0] << 1) + B[j][5] n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4] # Find the permutation value v = des.__sbox[j][(m << 4) + n] # Turn value into bits, add it to result: Bn Bn[pos] = (v & 8) >> 3 Bn[pos + 1] = (v & 4) >> 2 Bn[pos + 2] = (v & 2) >> 1 Bn[pos + 3] = v & 1 pos += 4 j += 1 # Permutate the concatination of B[1] to B[8] (Bn) self.R = self.__permutate(des.__p, Bn) # Xor with L[i - 1] self.R = list(map(lambda x, y: x ^ y, self.R, self.L)) # Optimization: This now replaces the below commented code #j = 0 #while j < len(self.R): # self.R[j] = self.R[j] ^ self.L[j] # j += 1 # L[i] becomes R[i - 1] self.L = tempR i += 1 iteration += iteration_adjustment # Final permutation of R[16]L[16] self.final = self.__permutate(des.__fp, self.R + self.L) return self.final
[ "def", "__des_crypt", "(", "self", ",", "block", ",", "crypt_type", ")", ":", "block", "=", "self", ".", "__permutate", "(", "des", ".", "__ip", ",", "block", ")", "self", ".", "L", "=", "block", "[", ":", "32", "]", "self", ".", "R", "=", "block...
Crypt the block of data through DES bit-manipulation
[ "Crypt", "the", "block", "of", "data", "through", "DES", "bit", "-", "manipulation" ]
python
train
tensorflow/cleverhans
cleverhans/plot/pyplot_image.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/pyplot_image.py#L139-L182
def linear_extrapolation_plot(log_prob_adv_array, y, file_name, min_epsilon=-10, max_epsilon=10, num_points=21): """Generate linear extrapolation plot. Args: log_prob_adv_array: Numpy array containing log probabilities y: Tf placeholder for the labels file_name: Plot filename min_epsilon: Minimum value of epsilon over the interval max_epsilon: Maximum value of epsilon over the interval num_points: Number of points used to interpolate """ import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt figure = plt.figure() figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot') correct_idx = np.argmax(y, axis=0) fig = plt.figure() plt.xlabel('Epsilon') plt.ylabel('Logits') x_axis = np.linspace(min_epsilon, max_epsilon, num_points) plt.xlim(min_epsilon - 1, max_epsilon + 1) for i in range(y.shape[0]): if i == correct_idx: ls = '-' linewidth = 5 else: ls = '--' linewidth = 2 plt.plot( x_axis, log_prob_adv_array[:, i], ls=ls, linewidth=linewidth, label='{}'.format(i)) plt.legend(loc='best', fontsize=14) plt.show() fig.savefig(file_name) plt.clf() return figure
[ "def", "linear_extrapolation_plot", "(", "log_prob_adv_array", ",", "y", ",", "file_name", ",", "min_epsilon", "=", "-", "10", ",", "max_epsilon", "=", "10", ",", "num_points", "=", "21", ")", ":", "import", "matplotlib", "matplotlib", ".", "use", "(", "'Agg...
Generate linear extrapolation plot. Args: log_prob_adv_array: Numpy array containing log probabilities y: Tf placeholder for the labels file_name: Plot filename min_epsilon: Minimum value of epsilon over the interval max_epsilon: Maximum value of epsilon over the interval num_points: Number of points used to interpolate
[ "Generate", "linear", "extrapolation", "plot", "." ]
python
train
orb-framework/orb
orb/core/connection_types/sql/sqlconnection.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/connection_types/sql/sqlconnection.py#L443-L448
def rollback(self): """ Rolls back changes to this database. """ with self.native(writeAccess=True) as conn: return self._rollback(conn)
[ "def", "rollback", "(", "self", ")", ":", "with", "self", ".", "native", "(", "writeAccess", "=", "True", ")", "as", "conn", ":", "return", "self", ".", "_rollback", "(", "conn", ")" ]
Rolls back changes to this database.
[ "Rolls", "back", "changes", "to", "this", "database", "." ]
python
train
esheldon/fitsio
fitsio/hdu/image.py
https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/image.py#L206-L295
def _read_image_slice(self, arg): """ workhorse to read a slice """ if 'ndims' not in self._info: raise ValueError("Attempt to slice empty extension") if isinstance(arg, slice): # one-dimensional, e.g. 2:20 return self._read_image_slice((arg,)) if not isinstance(arg, tuple): raise ValueError("arguments must be slices, one for each " "dimension, e.g. [2:5] or [2:5,8:25] etc.") # should be a tuple of slices, one for each dimension # e.g. [2:3, 8:100] nd = len(arg) if nd != self._info['ndims']: raise ValueError("Got slice dimensions %d, " "expected %d" % (nd, self._info['ndims'])) targ = arg arg = [] for a in targ: if isinstance(a, slice): arg.append(a) elif isinstance(a, int): arg.append(slice(a, a+1, 1)) else: raise ValueError("arguments must be slices, e.g. 2:12") dims = self._info['dims'] arrdims = [] first = [] last = [] steps = [] # check the args and reverse dimensions since # fits is backwards from numpy dim = 0 for slc in arg: start = slc.start stop = slc.stop step = slc.step if start is None: start = 0 if stop is None: stop = dims[dim] if step is None: step = 1 if step < 1: raise ValueError("slice steps must be >= 1") if start < 0: start = dims[dim] + start if start < 0: raise IndexError("Index out of bounds") if stop < 0: stop = dims[dim] + start + 1 # move to 1-offset start = start + 1 if stop < start: raise ValueError("python slices but include at least one " "element, got %s" % slc) if stop > dims[dim]: stop = dims[dim] first.append(start) last.append(stop) steps.append(step) arrdims.append(stop-start+1) dim += 1 first.reverse() last.reverse() steps.reverse() first = numpy.array(first, dtype='i8') last = numpy.array(last, dtype='i8') steps = numpy.array(steps, dtype='i8') npy_dtype = self._get_image_numpy_dtype() array = numpy.zeros(arrdims, dtype=npy_dtype) self._FITS.read_image_slice(self._ext+1, first, last, steps, array) return array
[ "def", "_read_image_slice", "(", "self", ",", "arg", ")", ":", "if", "'ndims'", "not", "in", "self", ".", "_info", ":", "raise", "ValueError", "(", "\"Attempt to slice empty extension\"", ")", "if", "isinstance", "(", "arg", ",", "slice", ")", ":", "# one-di...
workhorse to read a slice
[ "workhorse", "to", "read", "a", "slice" ]
python
train
frostming/marko
marko/__init__.py
https://github.com/frostming/marko/blob/1cd030b665fa37bad1f8b3a25a89ce1a7c491dde/marko/__init__.py#L48-L55
def render(self, parsed): """Call ``self.renderer.render(text)``. Override this to handle parsed result. """ self.renderer.root_node = parsed with self.renderer as r: return r.render(parsed)
[ "def", "render", "(", "self", ",", "parsed", ")", ":", "self", ".", "renderer", ".", "root_node", "=", "parsed", "with", "self", ".", "renderer", "as", "r", ":", "return", "r", ".", "render", "(", "parsed", ")" ]
Call ``self.renderer.render(text)``. Override this to handle parsed result.
[ "Call", "self", ".", "renderer", ".", "render", "(", "text", ")", "." ]
python
train
bkjones/pyrabbit
pyrabbit/api.py
https://github.com/bkjones/pyrabbit/blob/e8a9f74ed5c6bba958994fb9a72c396e6a99ea0f/pyrabbit/api.py#L519-L542
def get_queue_depths(self, vhost, names=None): """ Get the number of messages currently sitting in either the queue names listed in 'names', or all queues in 'vhost' if no 'names' are given. :param str vhost: Vhost where queues in 'names' live. :param list names: OPTIONAL - Specific queues to show depths for. If None, show depths for all queues in 'vhost'. """ vhost = quote(vhost, '') if not names: # get all queues in vhost path = Client.urls['queues_by_vhost'] % vhost queues = self._call(path, 'GET') for queue in queues: depth = queue['messages'] print("\t%s: %s" % (queue, depth)) else: # get the named queues only. for name in names: depth = self.get_queue_depth(vhost, name) print("\t%s: %s" % (name, depth))
[ "def", "get_queue_depths", "(", "self", ",", "vhost", ",", "names", "=", "None", ")", ":", "vhost", "=", "quote", "(", "vhost", ",", "''", ")", "if", "not", "names", ":", "# get all queues in vhost", "path", "=", "Client", ".", "urls", "[", "'queues_by_v...
Get the number of messages currently sitting in either the queue names listed in 'names', or all queues in 'vhost' if no 'names' are given. :param str vhost: Vhost where queues in 'names' live. :param list names: OPTIONAL - Specific queues to show depths for. If None, show depths for all queues in 'vhost'.
[ "Get", "the", "number", "of", "messages", "currently", "sitting", "in", "either", "the", "queue", "names", "listed", "in", "names", "or", "all", "queues", "in", "vhost", "if", "no", "names", "are", "given", "." ]
python
train
franciscogarate/pyliferisk
pyliferisk/__init__.py
https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L464-L470
def qaaxn(mt, x, n, q, m = 1): """ geometrica """ #i = float(nt[1]) q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return aaxn(mtj, x, n, m)
[ "def", "qaaxn", "(", "mt", ",", "x", ",", "n", ",", "q", ",", "m", "=", "1", ")", ":", "#i = float(nt[1])", "q", "=", "float", "(", "q", ")", "j", "=", "(", "mt", ".", "i", "-", "q", ")", "/", "(", "1", "+", "q", ")", "mtj", "=", "Actua...
geometrica
[ "geometrica" ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L11067-L11088
def _set_dpod(self, v, load=False): """ Setter method for dpod, mapped from YANG variable /dpod (container) If this variable is read-only (config: false) in the source YANG file, then _set_dpod is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dpod() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dpod.dpod, is_container='container', presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Manage and display DPOD license assignments.\nUsage: dpod [slot/port] [reserve|release]', u'display-when': u'(/c:capabilities/c:license/c:dpod_display = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-license', defining_module='brocade-license', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """dpod must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=dpod.dpod, is_container='container', presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Manage and display DPOD license assignments.\nUsage: dpod [slot/port] [reserve|release]', u'display-when': u'(/c:capabilities/c:license/c:dpod_display = "true")'}}, namespace='urn:brocade.com:mgmt:brocade-license', defining_module='brocade-license', yang_type='container', is_config=True)""", }) self.__dpod = t if hasattr(self, '_set'): self._set()
[ "def", "_set_dpod", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for dpod, mapped from YANG variable /dpod (container) If this variable is read-only (config: false) in the source YANG file, then _set_dpod is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dpod() directly.
[ "Setter", "method", "for", "dpod", "mapped", "from", "YANG", "variable", "/", "dpod", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_dpod"...
python
train
swharden/SWHLab
swhlab/analysis/glance.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/analysis/glance.py#L26-L35
def processFolder(abfFolder): """call processAbf() for every ABF in a folder.""" if not type(abfFolder) is str or not len(abfFolder)>3: return files=sorted(glob.glob(abfFolder+"/*.abf")) for i,fname in enumerate(files): print("\n\n\n### PROCESSING {} of {}:".format(i,len(files)),os.path.basename(fname)) processAbf(fname,show=False) plt.show() return
[ "def", "processFolder", "(", "abfFolder", ")", ":", "if", "not", "type", "(", "abfFolder", ")", "is", "str", "or", "not", "len", "(", "abfFolder", ")", ">", "3", ":", "return", "files", "=", "sorted", "(", "glob", ".", "glob", "(", "abfFolder", "+", ...
call processAbf() for every ABF in a folder.
[ "call", "processAbf", "()", "for", "every", "ABF", "in", "a", "folder", "." ]
python
valid
tensorpack/tensorpack
tensorpack/train/tower.py
https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/train/tower.py#L89-L147
def get_predictor(self, input_names, output_names, device=0): """ This method will build the trainer's tower function under ``TowerContext(is_training=False)``, and returns a callable predictor with input placeholders & output tensors in this tower. This method handles the common case of inference with the same tower function. If you want to do inference with a different tower function, you can always build the tower by yourself, under a "reuse" variable scope and a `TowerContext(is_training=False)`. Args: input_names (list): list of input names, matching the inputs declared for the trainer. output_names(list): list of tensor names without the tower prefix. device (int): build the predictor on device '/gpu:{device}' or use -1 for '/cpu:0'. Returns: an :class:`OnlinePredictor`. Example: .. code-block:: none # in the graph: interesting_tensor = tf.identity(x, name='fun') # in _setup_graph callback method: self._predictor = self.trainer.get_predictor(['input1', 'input2'], ['fun']) # After session is initialized (see Tutorials - Write a Callback), can use it by: outputs = self._predictor(input1, input2) The CycleGAN example and DQN example have more concrete use of this method. """ assert self.tower_func is not None, "Must set tower_func on the trainer to use get_predictor()!" tower_name = 'tower-pred-{}'.format(device) if device >= 0 else 'tower-pred-cpu' device_id = device device = '/gpu:{}'.format(device_id) if device_id >= 0 else '/cpu:0' try: tower = self.tower_func.towers[tower_name] assert tower is not None, "This is a bug!" except KeyError: tower = None if tower is None: input = PlaceholderInput() input.setup(self.input_signature) vs_name = self._vs_name_for_predictor(device_id) with tfv1.variable_scope(tfv1.get_variable_scope(), reuse=True), \ tf.device(device), PredictTowerContext( tower_name, vs_name=vs_name): logger.info("Building graph for predict tower '{}' on device {} {}...".format( tower_name, device, "with variable scope '{}'".format(vs_name) if vs_name else '')) self.tower_func(*input.get_input_tensors()) tower = self.tower_func.towers[tower_name] input_tensors = tower.get_tensors(input_names) output_tensors = tower.get_tensors(output_names) predictor = OnlinePredictor(input_tensors, output_tensors) self._predictors.append(predictor) return predictor
[ "def", "get_predictor", "(", "self", ",", "input_names", ",", "output_names", ",", "device", "=", "0", ")", ":", "assert", "self", ".", "tower_func", "is", "not", "None", ",", "\"Must set tower_func on the trainer to use get_predictor()!\"", "tower_name", "=", "'tow...
This method will build the trainer's tower function under ``TowerContext(is_training=False)``, and returns a callable predictor with input placeholders & output tensors in this tower. This method handles the common case of inference with the same tower function. If you want to do inference with a different tower function, you can always build the tower by yourself, under a "reuse" variable scope and a `TowerContext(is_training=False)`. Args: input_names (list): list of input names, matching the inputs declared for the trainer. output_names(list): list of tensor names without the tower prefix. device (int): build the predictor on device '/gpu:{device}' or use -1 for '/cpu:0'. Returns: an :class:`OnlinePredictor`. Example: .. code-block:: none # in the graph: interesting_tensor = tf.identity(x, name='fun') # in _setup_graph callback method: self._predictor = self.trainer.get_predictor(['input1', 'input2'], ['fun']) # After session is initialized (see Tutorials - Write a Callback), can use it by: outputs = self._predictor(input1, input2) The CycleGAN example and DQN example have more concrete use of this method.
[ "This", "method", "will", "build", "the", "trainer", "s", "tower", "function", "under", "TowerContext", "(", "is_training", "=", "False", ")", "and", "returns", "a", "callable", "predictor", "with", "input", "placeholders", "&", "output", "tensors", "in", "thi...
python
train
lk-geimfari/mimesis
mimesis/providers/transport.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/transport.py#L29-L42
def truck(self, model_mask: str = '#### @@') -> str: """Generate a truck model. :param model_mask: Mask of truck model. Here '@' is a placeholder of characters and '#' is a placeholder of digits. :return: Dummy truck model. :Example: Caledon-966O. """ return '{}-{}'.format( self.random.choice(TRUCKS), self.random.custom_code(model_mask), )
[ "def", "truck", "(", "self", ",", "model_mask", ":", "str", "=", "'#### @@'", ")", "->", "str", ":", "return", "'{}-{}'", ".", "format", "(", "self", ".", "random", ".", "choice", "(", "TRUCKS", ")", ",", "self", ".", "random", ".", "custom_code", "(...
Generate a truck model. :param model_mask: Mask of truck model. Here '@' is a placeholder of characters and '#' is a placeholder of digits. :return: Dummy truck model. :Example: Caledon-966O.
[ "Generate", "a", "truck", "model", "." ]
python
train
polyaxon/polyaxon
polyaxon/db/models/pipelines.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/db/models/pipelines.py#L549-L570
def check_upstream_trigger(self) -> bool: """Checks the upstream and the trigger rule.""" if self.operation.trigger_policy == TriggerPolicy.ONE_DONE: return self.upstream_runs.filter( status__status__in=self.STATUSES.DONE_STATUS).exists() if self.operation.trigger_policy == TriggerPolicy.ONE_SUCCEEDED: return self.upstream_runs.filter( status__status=self.STATUSES.SUCCEEDED).exists() if self.operation.trigger_policy == TriggerPolicy.ONE_FAILED: return self.upstream_runs.filter( status__status=self.STATUSES.FAILED).exists() statuses = self.upstream_runs.values_list('status__status', flat=True) if self.operation.trigger_policy == TriggerPolicy.ALL_DONE: return not bool([True for status in statuses if status not in self.STATUSES.DONE_STATUS]) if self.operation.trigger_policy == TriggerPolicy.ALL_SUCCEEDED: return not bool([True for status in statuses if status != self.STATUSES.SUCCEEDED]) if self.operation.trigger_policy == TriggerPolicy.ALL_FAILED: return not bool([True for status in statuses if status not in self.STATUSES.FAILED_STATUS])
[ "def", "check_upstream_trigger", "(", "self", ")", "->", "bool", ":", "if", "self", ".", "operation", ".", "trigger_policy", "==", "TriggerPolicy", ".", "ONE_DONE", ":", "return", "self", ".", "upstream_runs", ".", "filter", "(", "status__status__in", "=", "se...
Checks the upstream and the trigger rule.
[ "Checks", "the", "upstream", "and", "the", "trigger", "rule", "." ]
python
train
droope/droopescan
dscan/common/update_api.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/common/update_api.py#L307-L315
def init(self): """ Performs a clone or a fetch, depending on whether the repository has been previously cloned or not. """ if os.path.isdir(self.path): self.fetch() else: self.clone()
[ "def", "init", "(", "self", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "self", ".", "path", ")", ":", "self", ".", "fetch", "(", ")", "else", ":", "self", ".", "clone", "(", ")" ]
Performs a clone or a fetch, depending on whether the repository has been previously cloned or not.
[ "Performs", "a", "clone", "or", "a", "fetch", "depending", "on", "whether", "the", "repository", "has", "been", "previously", "cloned", "or", "not", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L1556-L1566
def rate_sp(self): """ Sets the rate_sp at which the servo travels from 0 to 100.0% (half of the full range of the servo). Units are in milliseconds. Example: Setting the rate_sp to 1000 means that it will take a 180 degree servo 2 second to move from 0 to 180 degrees. Note: Some servo controllers may not support this in which case reading and writing will fail with `-EOPNOTSUPP`. In continuous rotation servos, this value will affect the rate_sp at which the speed ramps up or down. """ self._rate_sp, value = self.get_attr_int(self._rate_sp, 'rate_sp') return value
[ "def", "rate_sp", "(", "self", ")", ":", "self", ".", "_rate_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_rate_sp", ",", "'rate_sp'", ")", "return", "value" ]
Sets the rate_sp at which the servo travels from 0 to 100.0% (half of the full range of the servo). Units are in milliseconds. Example: Setting the rate_sp to 1000 means that it will take a 180 degree servo 2 second to move from 0 to 180 degrees. Note: Some servo controllers may not support this in which case reading and writing will fail with `-EOPNOTSUPP`. In continuous rotation servos, this value will affect the rate_sp at which the speed ramps up or down.
[ "Sets", "the", "rate_sp", "at", "which", "the", "servo", "travels", "from", "0", "to", "100", ".", "0%", "(", "half", "of", "the", "full", "range", "of", "the", "servo", ")", ".", "Units", "are", "in", "milliseconds", ".", "Example", ":", "Setting", ...
python
train
tdryer/hangups
hangups/auth.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/auth.py#L160-L171
def set(self, refresh_token): """Cache a refresh token, ignoring any failure. Args: refresh_token (str): Refresh token to cache. """ logger.info('Saving refresh_token to %s', repr(self._filename)) try: with open(self._filename, 'w') as f: f.write(refresh_token) except IOError as e: logger.warning('Failed to save refresh_token: %s', e)
[ "def", "set", "(", "self", ",", "refresh_token", ")", ":", "logger", ".", "info", "(", "'Saving refresh_token to %s'", ",", "repr", "(", "self", ".", "_filename", ")", ")", "try", ":", "with", "open", "(", "self", ".", "_filename", ",", "'w'", ")", "as...
Cache a refresh token, ignoring any failure. Args: refresh_token (str): Refresh token to cache.
[ "Cache", "a", "refresh", "token", "ignoring", "any", "failure", "." ]
python
valid
CI-WATER/gsshapy
gsshapy/grid/grid_to_gssha.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L681-L806
def _load_converted_gssha_data_from_lsm(self, gssha_var, lsm_var, load_type, time_step=None): """ This function loads data from LSM and converts to GSSHA format """ if 'radiation' in gssha_var: conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] if gssha_var.startswith('direct_radiation') and not isinstance(lsm_var, basestring): # direct_radiation = (1-DIFFUSIVE_FRACION)*global_radiation global_radiation_var, diffusive_fraction_var = lsm_var global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor) diffusive_fraction = self._load_lsm_data(diffusive_fraction_var) if gssha_var.endswith("cc"): diffusive_fraction /= 100.0 self.data = ((1-diffusive_fraction)*global_radiation) elif gssha_var.startswith('diffusive_radiation') and not isinstance(lsm_var, basestring): # diffusive_radiation = DIFFUSIVE_FRACION*global_radiation global_radiation_var, diffusive_fraction_var = lsm_var global_radiation = self._load_lsm_data(global_radiation_var, conversion_factor) diffusive_fraction = self._load_lsm_data(diffusive_fraction_var) if gssha_var.endswith("cc"): diffusive_fraction /= 100 self.data = (diffusive_fraction*global_radiation) elif isinstance(lsm_var, basestring): self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type]) else: raise ValueError("Invalid LSM variable ({0}) for GSSHA variable {1}".format(lsm_var, gssha_var)) elif gssha_var == 'relative_humidity' and not isinstance(lsm_var, str): ##CONVERSION ASSUMPTIONS: ##1) These equations are for liquid water and are less accurate below 0 deg C ##2) Not adjusting the pressure for the fact that the temperature ## and moisture measurements are given at 2 m AGL. ##Neither of these should have a significant impact on RH values ##given the uncertainty in the model values themselves. specific_humidity_var, pressure_var, temperature_var = lsm_var specific_humidity = self._load_lsm_data(specific_humidity_var) pressure = self._load_lsm_data(pressure_var) temperature = self._load_lsm_data(temperature_var) ##To compute the relative humidity at 2m, ##given T, Q (water vapor mixing ratio) at 2 m and PSFC (surface pressure): ##Es(saturation vapor pressure in Pa) ##Qs(saturation mixing ratio)=(0.622*es)/(PSFC-es) ##RH = 100*Q/Qs es = esat(temperature) self.data = 100 * specific_humidity/((0.622*es)/(pressure-es)) elif gssha_var == 'relative_humidity_dew': # https://software.ecmwf.int/wiki/display/CKB/Do+ERA+datasets+contain+parameters+for+near-surface+humidity # temperature in Kelvin # RH = 100 * es(Td)/es(T) dew_point_temp_var, temperature_var = lsm_var dew_point_temp = self._load_lsm_data(dew_point_temp_var) temperature = self._load_lsm_data(temperature_var) self.data = 100 * esat(dew_point_temp)/esat(temperature) elif gssha_var == 'wind_speed' and not isinstance(lsm_var, str): # WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables u_vector_var, v_vector_var = lsm_var conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] u_vector = self._load_lsm_data(u_vector_var, conversion_factor) v_vector = self._load_lsm_data(v_vector_var, conversion_factor) self.data = (xu.sqrt(u_vector**2 + v_vector**2)) elif 'precipitation' in gssha_var and not isinstance(lsm_var, str): # WRF: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables rain_c_var, rain_nc_var = lsm_var conversion_factor = self.netcdf_attributes[gssha_var]['conversion_factor'][load_type] rain_c = self._load_lsm_data(rain_c_var, conversion_factor) rain_nc = self._load_lsm_data(rain_nc_var, conversion_factor) self.data = rain_c + rain_nc else: self.data = self._load_lsm_data(lsm_var, self.netcdf_attributes[gssha_var]['conversion_factor'][load_type], self.netcdf_attributes[gssha_var].get('calc_4d_method'), self.netcdf_attributes[gssha_var].get('calc_4d_dim'), time_step=time_step) conversion_function = self.netcdf_attributes[gssha_var].get('conversion_function') if conversion_function: self.data.values = self.netcdf_attributes[gssha_var]['conversion_function'][load_type](self.data.values) if 'precipitation' in gssha_var: # NOTE: Precipitation is converted from mm/s to mm/hr # with the conversion factor when it is a rate. if 'units' in self.data.attrs: if self.data.attrs['units'] == 'm': # convert from m to mm self.data.values *= 1000 if load_type == 'ascii' or load_type == 'netcdf': # CONVERT TO INCREMENTAL if gssha_var == 'precipitation_acc': self.data.values = np.lib.pad(self.data.diff(self.lsm_time_dim).values, ((1, 0), (0, 0), (0, 0)), 'constant', constant_values=0) # CONVERT PRECIP TO RADAR (mm/hr) IN FILE if gssha_var == 'precipitation_inc' or gssha_var == 'precipitation_acc': # convert from mm to mm/hr time_step_hours = np.diff(self.xd[self.lsm_time_var].values)[0]/np.timedelta64(1, 'h') self.data.values /= time_step_hours # convert to dataset gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name'] self.data = self.data.to_dataset(name=gssha_data_var_name) self.data.rename( { self.lsm_lon_dim: 'x', self.lsm_lat_dim: 'y', self.lsm_lon_var: 'lon', self.lsm_lat_var: 'lat' }, inplace=True ) self.data.attrs = {'proj4': self.xd.lsm.projection.ExportToProj4()} self.data[gssha_data_var_name].attrs = { 'standard_name': self.netcdf_attributes[gssha_var]['standard_name'], 'long_name': self.netcdf_attributes[gssha_var]['long_name'], 'units': self.netcdf_attributes[gssha_var]['units'][load_type], }
[ "def", "_load_converted_gssha_data_from_lsm", "(", "self", ",", "gssha_var", ",", "lsm_var", ",", "load_type", ",", "time_step", "=", "None", ")", ":", "if", "'radiation'", "in", "gssha_var", ":", "conversion_factor", "=", "self", ".", "netcdf_attributes", "[", ...
This function loads data from LSM and converts to GSSHA format
[ "This", "function", "loads", "data", "from", "LSM", "and", "converts", "to", "GSSHA", "format" ]
python
train
bcbio/bcbio-nextgen
bcbio/cwl/workflow.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/cwl/workflow.py#L150-L160
def is_cwl_record(d): """Check if an input is a CWL record, from any level of nesting. """ if isinstance(d, dict): if d.get("type") == "record": return d else: recs = list(filter(lambda x: x is not None, [is_cwl_record(v) for v in d.values()])) return recs[0] if recs else None else: return None
[ "def", "is_cwl_record", "(", "d", ")", ":", "if", "isinstance", "(", "d", ",", "dict", ")", ":", "if", "d", ".", "get", "(", "\"type\"", ")", "==", "\"record\"", ":", "return", "d", "else", ":", "recs", "=", "list", "(", "filter", "(", "lambda", ...
Check if an input is a CWL record, from any level of nesting.
[ "Check", "if", "an", "input", "is", "a", "CWL", "record", "from", "any", "level", "of", "nesting", "." ]
python
train
jssimporter/python-jss
jss/jamf_software_server.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jamf_software_server.py#L459-L484
def load_from_xml(self, path): """Load all objects from XML file and return as dict. The dict returned will have keys named the same as the JSSObject classes contained, and the values will be JSSObjectLists of all full objects of that class (for example, the equivalent of my_jss.Computer().retrieve_all()). This method can potentially take a very long time! Args: path: String file path to the file you wish to load from. Path will have ~ expanded prior to opening. """ with open(os.path.expanduser(path), "r") as ifile: et = ElementTree.parse(ifile) root = et.getroot() all_objects = {} for child in root: obj_type = self.__getattribute__(child.tag) objects = [obj_type(obj) for obj in child] all_objects[child.tag] = JSSObjectList(self.factory, None, objects) return all_objects
[ "def", "load_from_xml", "(", "self", ",", "path", ")", ":", "with", "open", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ",", "\"r\"", ")", "as", "ifile", ":", "et", "=", "ElementTree", ".", "parse", "(", "ifile", ")", "root", "=", ...
Load all objects from XML file and return as dict. The dict returned will have keys named the same as the JSSObject classes contained, and the values will be JSSObjectLists of all full objects of that class (for example, the equivalent of my_jss.Computer().retrieve_all()). This method can potentially take a very long time! Args: path: String file path to the file you wish to load from. Path will have ~ expanded prior to opening.
[ "Load", "all", "objects", "from", "XML", "file", "and", "return", "as", "dict", "." ]
python
train
fmfn/BayesianOptimization
bayes_opt/util.py
https://github.com/fmfn/BayesianOptimization/blob/8ce2292895137477963cf1bafa4e71fa20b2ce49/bayes_opt/util.py#L159-L171
def ensure_rng(random_state=None): """ Creates a random number generator based on an optional seed. This can be an integer or another random state for a seeded rng, or None for an unseeded rng. """ if random_state is None: random_state = np.random.RandomState() elif isinstance(random_state, int): random_state = np.random.RandomState(random_state) else: assert isinstance(random_state, np.random.RandomState) return random_state
[ "def", "ensure_rng", "(", "random_state", "=", "None", ")", ":", "if", "random_state", "is", "None", ":", "random_state", "=", "np", ".", "random", ".", "RandomState", "(", ")", "elif", "isinstance", "(", "random_state", ",", "int", ")", ":", "random_state...
Creates a random number generator based on an optional seed. This can be an integer or another random state for a seeded rng, or None for an unseeded rng.
[ "Creates", "a", "random", "number", "generator", "based", "on", "an", "optional", "seed", ".", "This", "can", "be", "an", "integer", "or", "another", "random", "state", "for", "a", "seeded", "rng", "or", "None", "for", "an", "unseeded", "rng", "." ]
python
train
pazz/urwidtrees
urwidtrees/decoration.py
https://github.com/pazz/urwidtrees/blob/d1fa38ce4f37db00bdfc574b856023b5db4c7ead/urwidtrees/decoration.py#L329-L353
def _construct_spacer(self, pos, acc): """ build a spacer that occupies the horizontally indented space between pos's parent and the root node. It will return a list of tuples to be fed into a Columns widget. """ parent = self._tree.parent_position(pos) if parent is not None: grandparent = self._tree.parent_position(parent) if self._indent > 0 and grandparent is not None: parent_sib = self._tree.next_sibling_position(parent) draw_vbar = parent_sib is not None and \ self._arrow_vbar_char is not None space_width = self._indent - 1 * (draw_vbar) - self._childbar_offset if space_width > 0: void = urwid.AttrMap(urwid.SolidFill(' '), self._arrow_att) acc.insert(0, ((space_width, void))) if draw_vbar: barw = urwid.SolidFill(self._arrow_vbar_char) bar = urwid.AttrMap(barw, self._arrow_vbar_att or self._arrow_att) acc.insert(0, ((1, bar))) return self._construct_spacer(parent, acc) else: return acc
[ "def", "_construct_spacer", "(", "self", ",", "pos", ",", "acc", ")", ":", "parent", "=", "self", ".", "_tree", ".", "parent_position", "(", "pos", ")", "if", "parent", "is", "not", "None", ":", "grandparent", "=", "self", ".", "_tree", ".", "parent_po...
build a spacer that occupies the horizontally indented space between pos's parent and the root node. It will return a list of tuples to be fed into a Columns widget.
[ "build", "a", "spacer", "that", "occupies", "the", "horizontally", "indented", "space", "between", "pos", "s", "parent", "and", "the", "root", "node", ".", "It", "will", "return", "a", "list", "of", "tuples", "to", "be", "fed", "into", "a", "Columns", "w...
python
train
deginner/mq-client
mq_client.py
https://github.com/deginner/mq-client/blob/a20ab50ea18870c01e8d142b049233c355858872/mq_client.py#L10-L33
def _on_message(channel, method, header, body): """ Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body """ print "Message:" print "\t%r" % method print "\t%r" % header print "\t%r" % body # Acknowledge message receipt channel.basic_ack(method.delivery_tag) # when ready, stop consuming channel.stop_consuming()
[ "def", "_on_message", "(", "channel", ",", "method", ",", "header", ",", "body", ")", ":", "print", "\"Message:\"", "print", "\"\\t%r\"", "%", "method", "print", "\"\\t%r\"", "%", "header", "print", "\"\\t%r\"", "%", "body", "# Acknowledge message receipt", "cha...
Invoked by pika when a message is delivered from RabbitMQ. The channel is passed for your convenience. The basic_deliver object that is passed in carries the exchange, routing key, delivery tag and a redelivered flag for the message. The properties passed in is an instance of BasicProperties with the message properties and the body is the message that was sent. :param pika.channel.Channel channel: The channel object :param pika.Spec.Basic.Deliver method: The Deliver method :param pika.Spec.BasicProperties properties: The client properties :param str|unicode body: The message body
[ "Invoked", "by", "pika", "when", "a", "message", "is", "delivered", "from", "RabbitMQ", ".", "The", "channel", "is", "passed", "for", "your", "convenience", ".", "The", "basic_deliver", "object", "that", "is", "passed", "in", "carries", "the", "exchange", "r...
python
train
OSSOS/MOP
src/ossos/core/ossos/downloads/cutouts/focus.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/downloads/cutouts/focus.py#L24-L35
def calculate_focus(self, reading): """ Determines what the focal point of the downloaded image should be. Returns: focal_point: (x, y) The location of the source in the middle observation, in the coordinate system of the current source reading. """ middle_index = len(self.source.get_readings()) // 2 middle_reading = self.source.get_reading(middle_index) return self.convert_source_location(middle_reading, reading)
[ "def", "calculate_focus", "(", "self", ",", "reading", ")", ":", "middle_index", "=", "len", "(", "self", ".", "source", ".", "get_readings", "(", ")", ")", "//", "2", "middle_reading", "=", "self", ".", "source", ".", "get_reading", "(", "middle_index", ...
Determines what the focal point of the downloaded image should be. Returns: focal_point: (x, y) The location of the source in the middle observation, in the coordinate system of the current source reading.
[ "Determines", "what", "the", "focal", "point", "of", "the", "downloaded", "image", "should", "be", "." ]
python
train
chrisjrn/registrasion
registrasion/views.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/views.py#L690-L730
def invoice_access(request, access_code): ''' Redirects to an invoice for the attendee that matches the given access code, if any. If the attendee has multiple invoices, we use the following tie-break: - If there's an unpaid invoice, show that, otherwise - If there's a paid invoice, show the most recent one, otherwise - Show the most recent invoid of all Arguments: access_code (castable to int): The access code for the user whose invoice you want to see. Returns: redirect: Redirect to the selected invoice for that user. Raises: Http404: If the user has no invoices. ''' invoices = commerce.Invoice.objects.filter( user__attendee__access_code=access_code, ).order_by("-issue_time") if not invoices: raise Http404() unpaid = invoices.filter(status=commerce.Invoice.STATUS_UNPAID) paid = invoices.filter(status=commerce.Invoice.STATUS_PAID) if unpaid: invoice = unpaid[0] # (should only be 1 unpaid invoice?) elif paid: invoice = paid[0] # Most recent paid invoice else: invoice = invoices[0] # Most recent of any invoices return redirect("invoice", invoice.id, access_code)
[ "def", "invoice_access", "(", "request", ",", "access_code", ")", ":", "invoices", "=", "commerce", ".", "Invoice", ".", "objects", ".", "filter", "(", "user__attendee__access_code", "=", "access_code", ",", ")", ".", "order_by", "(", "\"-issue_time\"", ")", "...
Redirects to an invoice for the attendee that matches the given access code, if any. If the attendee has multiple invoices, we use the following tie-break: - If there's an unpaid invoice, show that, otherwise - If there's a paid invoice, show the most recent one, otherwise - Show the most recent invoid of all Arguments: access_code (castable to int): The access code for the user whose invoice you want to see. Returns: redirect: Redirect to the selected invoice for that user. Raises: Http404: If the user has no invoices.
[ "Redirects", "to", "an", "invoice", "for", "the", "attendee", "that", "matches", "the", "given", "access", "code", "if", "any", "." ]
python
test
kyuupichan/aiorpcX
aiorpcx/socks.py
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/socks.py#L395-L427
async def create_connection(self, protocol_factory, host, port, *, resolve=False, ssl=None, family=0, proto=0, flags=0): '''Set up a connection to (host, port) through the proxy. If resolve is True then host is resolved locally with getaddrinfo using family, proto and flags, otherwise the proxy is asked to resolve host. The function signature is similar to loop.create_connection() with the same result. The attribute _address is set on the protocol to the address of the successful remote connection. Additionally raises SOCKSError if something goes wrong with the proxy handshake. ''' loop = asyncio.get_event_loop() if resolve: remote_addresses = [NetAddress(info[4][0], info[4][1]) for info in await loop.getaddrinfo(host, port, family=family, proto=proto, type=socket.SOCK_STREAM, flags=flags)] else: remote_addresses = [NetAddress(host, port)] sock, remote_address = await self._connect(remote_addresses) def set_address(): protocol = protocol_factory() protocol._proxy = self protocol._remote_address = remote_address return protocol return await loop.create_connection(set_address, sock=sock, ssl=ssl, server_hostname=host if ssl else None)
[ "async", "def", "create_connection", "(", "self", ",", "protocol_factory", ",", "host", ",", "port", ",", "*", ",", "resolve", "=", "False", ",", "ssl", "=", "None", ",", "family", "=", "0", ",", "proto", "=", "0", ",", "flags", "=", "0", ")", ":",...
Set up a connection to (host, port) through the proxy. If resolve is True then host is resolved locally with getaddrinfo using family, proto and flags, otherwise the proxy is asked to resolve host. The function signature is similar to loop.create_connection() with the same result. The attribute _address is set on the protocol to the address of the successful remote connection. Additionally raises SOCKSError if something goes wrong with the proxy handshake.
[ "Set", "up", "a", "connection", "to", "(", "host", "port", ")", "through", "the", "proxy", "." ]
python
train
bram85/topydo
topydo/lib/MultiCommand.py
https://github.com/bram85/topydo/blob/b59fcfca5361869a6b78d4c9808c7c6cd0a18b58/topydo/lib/MultiCommand.py#L64-L78
def get_todos(self): """ Gets todo objects from supplied todo IDs. """ if self.is_expression: self.get_todos_from_expr() else: if self.last_argument: numbers = self.args[:-1] else: numbers = self.args for number in numbers: try: self.todos.append(self.todolist.todo(number)) except InvalidTodoException: self.invalid_numbers.append(number)
[ "def", "get_todos", "(", "self", ")", ":", "if", "self", ".", "is_expression", ":", "self", ".", "get_todos_from_expr", "(", ")", "else", ":", "if", "self", ".", "last_argument", ":", "numbers", "=", "self", ".", "args", "[", ":", "-", "1", "]", "els...
Gets todo objects from supplied todo IDs.
[ "Gets", "todo", "objects", "from", "supplied", "todo", "IDs", "." ]
python
train
saltstack/salt
salt/utils/vsan.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/vsan.py#L85-L107
def vsan_supported(service_instance): ''' Returns whether vsan is supported on the vCenter: api version needs to be 6 or higher service_instance Service instance to the host or vCenter ''' try: api_version = service_instance.content.about.apiVersion except vim.fault.NoPermission as exc: log.exception(exc) raise VMwareApiError('Not enough permissions. Required privilege: ' '{0}'.format(exc.privilegeId)) except vim.fault.VimFault as exc: log.exception(exc) raise VMwareApiError(exc.msg) except vmodl.RuntimeFault as exc: log.exception(exc) raise VMwareRuntimeError(exc.msg) if int(api_version.split('.')[0]) < 6: return False return True
[ "def", "vsan_supported", "(", "service_instance", ")", ":", "try", ":", "api_version", "=", "service_instance", ".", "content", ".", "about", ".", "apiVersion", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(...
Returns whether vsan is supported on the vCenter: api version needs to be 6 or higher service_instance Service instance to the host or vCenter
[ "Returns", "whether", "vsan", "is", "supported", "on", "the", "vCenter", ":", "api", "version", "needs", "to", "be", "6", "or", "higher" ]
python
train
nabla-c0d3/sslyze
setup.py
https://github.com/nabla-c0d3/sslyze/blob/0fb3ae668453d7ecf616d0755f237ca7be9f62fa/setup.py#L23-L28
def get_long_description(): """Convert the README file into the long description. """ with open(path.join(root_path, 'README.md'), encoding='utf-8') as f: long_description = f.read() return long_description
[ "def", "get_long_description", "(", ")", ":", "with", "open", "(", "path", ".", "join", "(", "root_path", ",", "'README.md'", ")", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "long_description", "=", "f", ".", "read", "(", ")", "return", "lon...
Convert the README file into the long description.
[ "Convert", "the", "README", "file", "into", "the", "long", "description", "." ]
python
train
quantopian/zipline
zipline/pipeline/classifiers/classifier.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/classifiers/classifier.py#L83-L116
def eq(self, other): """ Construct a Filter returning True for asset/date pairs where the output of ``self`` matches ``other``. """ # We treat this as an error because missing_values have NaN semantics, # which means this would return an array of all False, which is almost # certainly not what the user wants. if other == self.missing_value: raise ValueError( "Comparison against self.missing_value ({value!r}) in" " {typename}.eq().\n" "Missing values have NaN semantics, so the " "requested comparison would always produce False.\n" "Use the isnull() method to check for missing values.".format( value=other, typename=(type(self).__name__), ) ) if isinstance(other, Number) != (self.dtype == int64_dtype): raise InvalidClassifierComparison(self, other) if isinstance(other, Number): return NumExprFilter.create( "x_0 == {other}".format(other=int(other)), binds=(self,), ) else: return ArrayPredicate( term=self, op=operator.eq, opargs=(other,), )
[ "def", "eq", "(", "self", ",", "other", ")", ":", "# We treat this as an error because missing_values have NaN semantics,", "# which means this would return an array of all False, which is almost", "# certainly not what the user wants.", "if", "other", "==", "self", ".", "missing_val...
Construct a Filter returning True for asset/date pairs where the output of ``self`` matches ``other``.
[ "Construct", "a", "Filter", "returning", "True", "for", "asset", "/", "date", "pairs", "where", "the", "output", "of", "self", "matches", "other", "." ]
python
train
ml4ai/delphi
delphi/analysis/comparison/utils.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/analysis/comparison/utils.py#L6-L10
def draw_graph(G: nx.DiGraph, filename: str): """ Draw a networkx graph with Pygraphviz. """ A = to_agraph(G) A.graph_attr["rankdir"] = "LR" A.draw(filename, prog="dot")
[ "def", "draw_graph", "(", "G", ":", "nx", ".", "DiGraph", ",", "filename", ":", "str", ")", ":", "A", "=", "to_agraph", "(", "G", ")", "A", ".", "graph_attr", "[", "\"rankdir\"", "]", "=", "\"LR\"", "A", ".", "draw", "(", "filename", ",", "prog", ...
Draw a networkx graph with Pygraphviz.
[ "Draw", "a", "networkx", "graph", "with", "Pygraphviz", "." ]
python
train
PetrochukM/PyTorch-NLP
torchnlp/nn/lock_dropout.py
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/nn/lock_dropout.py#L52-L64
def forward(self, x): """ Args: x (:class:`torch.FloatTensor` [batch size, sequence length, rnn hidden size]): Input to apply dropout too. """ if not self.training or not self.p: return x x = x.clone() mask = x.new_empty(1, x.size(1), x.size(2), requires_grad=False).bernoulli_(1 - self.p) mask = mask.div_(1 - self.p) mask = mask.expand_as(x) return x * mask
[ "def", "forward", "(", "self", ",", "x", ")", ":", "if", "not", "self", ".", "training", "or", "not", "self", ".", "p", ":", "return", "x", "x", "=", "x", ".", "clone", "(", ")", "mask", "=", "x", ".", "new_empty", "(", "1", ",", "x", ".", ...
Args: x (:class:`torch.FloatTensor` [batch size, sequence length, rnn hidden size]): Input to apply dropout too.
[ "Args", ":", "x", "(", ":", "class", ":", "torch", ".", "FloatTensor", "[", "batch", "size", "sequence", "length", "rnn", "hidden", "size", "]", ")", ":", "Input", "to", "apply", "dropout", "too", "." ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/sciobj_store.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/sciobj_store.py#L134-L140
def open_sciobj_file_by_pid(pid, write=False): """Open the file containing the Science Object bytes at the custom location ``abs_path`` in the local filesystem for read.""" abs_path = get_abs_sciobj_file_path_by_pid(pid) if write: d1_common.utils.filesystem.create_missing_directories_for_file(abs_path) return open_sciobj_file_by_path(abs_path, write)
[ "def", "open_sciobj_file_by_pid", "(", "pid", ",", "write", "=", "False", ")", ":", "abs_path", "=", "get_abs_sciobj_file_path_by_pid", "(", "pid", ")", "if", "write", ":", "d1_common", ".", "utils", ".", "filesystem", ".", "create_missing_directories_for_file", "...
Open the file containing the Science Object bytes at the custom location ``abs_path`` in the local filesystem for read.
[ "Open", "the", "file", "containing", "the", "Science", "Object", "bytes", "at", "the", "custom", "location", "abs_path", "in", "the", "local", "filesystem", "for", "read", "." ]
python
train
pyviz/holoviews
holoviews/plotting/plot.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plot.py#L527-L566
def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True): """ Traverses the supplied object getting all options in opts for the specified opt_type and specs. Also takes into account the plotting class defaults for plot options. If a keyfn is supplied the returned options will be grouped by the returned keys. """ def lookup(x): """ Looks up options for object, including plot defaults. keyfn determines returned key otherwise None key is used. """ options = cls.lookup_options(x, opt_type) selected = {o: options.options[o] for o in opts if o in options.options} if opt_type == 'plot' and defaults: plot = Store.registry[cls.backend].get(type(x)) selected['defaults'] = {o: getattr(plot, o) for o in opts if o not in selected and hasattr(plot, o)} key = keyfn(x) if keyfn else None return (key, selected) # Traverse object and accumulate options by key traversed = obj.traverse(lookup, specs) options = defaultdict(lambda: defaultdict(list)) default_opts = defaultdict(lambda: defaultdict(list)) for key, opts in traversed: defaults = opts.pop('defaults', {}) for opt, v in opts.items(): options[key][opt].append(v) for opt, v in defaults.items(): default_opts[key][opt].append(v) # Merge defaults into dictionary if not explicitly specified for key, opts in default_opts.items(): for opt, v in opts.items(): if opt not in options[key]: options[key][opt] = v return options if keyfn else options[None]
[ "def", "_traverse_options", "(", "cls", ",", "obj", ",", "opt_type", ",", "opts", ",", "specs", "=", "None", ",", "keyfn", "=", "None", ",", "defaults", "=", "True", ")", ":", "def", "lookup", "(", "x", ")", ":", "\"\"\"\n Looks up options for o...
Traverses the supplied object getting all options in opts for the specified opt_type and specs. Also takes into account the plotting class defaults for plot options. If a keyfn is supplied the returned options will be grouped by the returned keys.
[ "Traverses", "the", "supplied", "object", "getting", "all", "options", "in", "opts", "for", "the", "specified", "opt_type", "and", "specs", ".", "Also", "takes", "into", "account", "the", "plotting", "class", "defaults", "for", "plot", "options", ".", "If", ...
python
train
Genida/archan
src/archan/dsm.py
https://github.com/Genida/archan/blob/a026d3105c7e86f30e6c9507b93ceb736684bfdc/src/archan/dsm.py#L24-L31
def validate_square(data, message=None, exception=MatrixError): """Validate that the matrix has equal number of rows and columns.""" rows, columns = len(data), len(data[0]) if data else 0 if message is None: message = 'Number of rows: %s != number of columns: %s in matrix' % ( rows, columns) if rows != columns: raise exception(message)
[ "def", "validate_square", "(", "data", ",", "message", "=", "None", ",", "exception", "=", "MatrixError", ")", ":", "rows", ",", "columns", "=", "len", "(", "data", ")", ",", "len", "(", "data", "[", "0", "]", ")", "if", "data", "else", "0", "if", ...
Validate that the matrix has equal number of rows and columns.
[ "Validate", "that", "the", "matrix", "has", "equal", "number", "of", "rows", "and", "columns", "." ]
python
train
michael-lazar/rtv
rtv/packages/praw/settings.py
https://github.com/michael-lazar/rtv/blob/ccef2af042566ad384977028cf0bde01bc524dda/rtv/packages/praw/settings.py#L25-L43
def _load_configuration(): """Attempt to load settings from various praw.ini files.""" config = configparser.RawConfigParser() module_dir = os.path.dirname(sys.modules[__name__].__file__) if 'APPDATA' in os.environ: # Windows os_config_path = os.environ['APPDATA'] elif 'XDG_CONFIG_HOME' in os.environ: # Modern Linux os_config_path = os.environ['XDG_CONFIG_HOME'] elif 'HOME' in os.environ: # Legacy Linux os_config_path = os.path.join(os.environ['HOME'], '.config') else: os_config_path = None locations = [os.path.join(module_dir, 'praw.ini'), 'praw.ini'] if os_config_path is not None: locations.insert(1, os.path.join(os_config_path, 'praw.ini')) if not config.read(locations): raise Exception('Could not find config file in any of: {0}' .format(locations)) return config
[ "def", "_load_configuration", "(", ")", ":", "config", "=", "configparser", ".", "RawConfigParser", "(", ")", "module_dir", "=", "os", ".", "path", ".", "dirname", "(", "sys", ".", "modules", "[", "__name__", "]", ".", "__file__", ")", "if", "'APPDATA'", ...
Attempt to load settings from various praw.ini files.
[ "Attempt", "to", "load", "settings", "from", "various", "praw", ".", "ini", "files", "." ]
python
train
SheffieldML/GPy
GPy/core/gp.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/gp.py#L202-L238
def set_XY(self, X=None, Y=None): """ Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray """ self.update_model(False) if Y is not None: if self.normalizer is not None: self.normalizer.scale_by(Y) self.Y_normalized = ObsAr(self.normalizer.normalize(Y)) self.Y = Y else: self.Y = ObsAr(Y) self.Y_normalized = self.Y if X is not None: if self.X in self.parameters: # LVM models if isinstance(self.X, VariationalPosterior): assert isinstance(X, type(self.X)), "The given X must have the same type as the X in the model!" index = self.X._parent_index_ self.unlink_parameter(self.X) self.X = X self.link_parameter(self.X, index=index) else: index = self.X._parent_index_ self.unlink_parameter(self.X) from ..core import Param self.X = Param('latent mean', X) self.link_parameter(self.X, index=index) else: self.X = ObsAr(X) self.update_model(True)
[ "def", "set_XY", "(", "self", ",", "X", "=", "None", ",", "Y", "=", "None", ")", ":", "self", ".", "update_model", "(", "False", ")", "if", "Y", "is", "not", "None", ":", "if", "self", ".", "normalizer", "is", "not", "None", ":", "self", ".", "...
Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray
[ "Set", "the", "input", "/", "output", "data", "of", "the", "model", "This", "is", "useful", "if", "we", "wish", "to", "change", "our", "existing", "data", "but", "maintain", "the", "same", "model" ]
python
train
robotools/fontParts
Lib/fontParts/base/kerning.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/kerning.py#L365-L374
def _find(self, pair, default=None): """ This is the environment implementation of :attr:`BaseKerning.find`. This must return an :ref:`type-int-float` or `default`. """ from fontTools.ufoLib.kerning import lookupKerningValue font = self.font groups = font.groups return lookupKerningValue(pair, self, groups, fallback=default)
[ "def", "_find", "(", "self", ",", "pair", ",", "default", "=", "None", ")", ":", "from", "fontTools", ".", "ufoLib", ".", "kerning", "import", "lookupKerningValue", "font", "=", "self", ".", "font", "groups", "=", "font", ".", "groups", "return", "lookup...
This is the environment implementation of :attr:`BaseKerning.find`. This must return an :ref:`type-int-float` or `default`.
[ "This", "is", "the", "environment", "implementation", "of", ":", "attr", ":", "BaseKerning", ".", "find", ".", "This", "must", "return", "an", ":", "ref", ":", "type", "-", "int", "-", "float", "or", "default", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/layers/util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/layers/util.py#L119-L193
def default_mean_field_normal_fn( is_singular=False, loc_initializer=tf.compat.v1.initializers.random_normal(stddev=0.1), untransformed_scale_initializer=tf.compat.v1.initializers.random_normal( mean=-3., stddev=0.1), loc_regularizer=None, untransformed_scale_regularizer=None, loc_constraint=None, untransformed_scale_constraint=None): """Creates a function to build Normal distributions with trainable params. This function produces a closure which produces `tfd.Normal` parameterized by a loc` and `scale` each created using `tf.get_variable`. Args: is_singular: Python `bool` if `True`, forces the special case limit of `scale->0`, i.e., a `Deterministic` distribution. loc_initializer: Initializer function for the `loc` parameters. The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`. untransformed_scale_initializer: Initializer function for the `scale` parameters. Default value: `tf.random_normal_initializer(mean=-3., stddev=0.1)`. This implies the softplus transformed result is initialized near `0`. It allows a `Normal` distribution with `scale` parameter set to this value to approximately act like a point mass. loc_regularizer: Regularizer function for the `loc` parameters. untransformed_scale_regularizer: Regularizer function for the `scale` parameters. loc_constraint: An optional projection function to be applied to the loc after being updated by an `Optimizer`. The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. untransformed_scale_constraint: An optional projection function to be applied to the `scale` parameters after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. Returns: make_normal_fn: Python `callable` which creates a `tfd.Normal` using from args: `dtype, shape, name, trainable, add_variable_fn`. """ loc_scale_fn = default_loc_scale_fn( is_singular=is_singular, loc_initializer=loc_initializer, untransformed_scale_initializer=untransformed_scale_initializer, loc_regularizer=loc_regularizer, untransformed_scale_regularizer=untransformed_scale_regularizer, loc_constraint=loc_constraint, untransformed_scale_constraint=untransformed_scale_constraint) def _fn(dtype, shape, name, trainable, add_variable_fn): """Creates multivariate `Deterministic` or `Normal` distribution. Args: dtype: Type of parameter's event. shape: Python `list`-like representing the parameter's event shape. name: Python `str` name prepended to any created (or existing) `tf.Variable`s. trainable: Python `bool` indicating all created `tf.Variable`s should be added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`. add_variable_fn: `tf.get_variable`-like `callable` used to create (or access existing) `tf.Variable`s. Returns: Multivariate `Deterministic` or `Normal` distribution. """ loc, scale = loc_scale_fn(dtype, shape, name, trainable, add_variable_fn) if scale is None: dist = tfd.Deterministic(loc=loc) else: dist = tfd.Normal(loc=loc, scale=scale) batch_ndims = tf.size(input=dist.batch_shape_tensor()) return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims) return _fn
[ "def", "default_mean_field_normal_fn", "(", "is_singular", "=", "False", ",", "loc_initializer", "=", "tf", ".", "compat", ".", "v1", ".", "initializers", ".", "random_normal", "(", "stddev", "=", "0.1", ")", ",", "untransformed_scale_initializer", "=", "tf", "....
Creates a function to build Normal distributions with trainable params. This function produces a closure which produces `tfd.Normal` parameterized by a loc` and `scale` each created using `tf.get_variable`. Args: is_singular: Python `bool` if `True`, forces the special case limit of `scale->0`, i.e., a `Deterministic` distribution. loc_initializer: Initializer function for the `loc` parameters. The default is `tf.random_normal_initializer(mean=0., stddev=0.1)`. untransformed_scale_initializer: Initializer function for the `scale` parameters. Default value: `tf.random_normal_initializer(mean=-3., stddev=0.1)`. This implies the softplus transformed result is initialized near `0`. It allows a `Normal` distribution with `scale` parameter set to this value to approximately act like a point mass. loc_regularizer: Regularizer function for the `loc` parameters. untransformed_scale_regularizer: Regularizer function for the `scale` parameters. loc_constraint: An optional projection function to be applied to the loc after being updated by an `Optimizer`. The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. untransformed_scale_constraint: An optional projection function to be applied to the `scale` parameters after being updated by an `Optimizer` (e.g. used to implement norm constraints or value constraints). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. Returns: make_normal_fn: Python `callable` which creates a `tfd.Normal` using from args: `dtype, shape, name, trainable, add_variable_fn`.
[ "Creates", "a", "function", "to", "build", "Normal", "distributions", "with", "trainable", "params", "." ]
python
test
draperjames/qtpandas
qtpandas/ui/fallback/easygui/boxes/utils.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/ui/fallback/easygui/boxes/utils.py#L74-L80
def exception_format(): """ Convert exception info into a string suitable for display. """ return "".join(traceback.format_exception( sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2] ))
[ "def", "exception_format", "(", ")", ":", "return", "\"\"", ".", "join", "(", "traceback", ".", "format_exception", "(", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ",", "sys", ".", "exc_info", "(", ")", "[", "1", "]", ",", "sys", ".", "exc_in...
Convert exception info into a string suitable for display.
[ "Convert", "exception", "info", "into", "a", "string", "suitable", "for", "display", "." ]
python
train
LonamiWebs/Telethon
telethon/tl/custom/conversation.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/conversation.py#L249-L295
async def wait_event(self, event, *, timeout=None): """ Waits for a custom event to occur. Timeouts still apply. Unless you're certain that your code will run fast enough, generally you should get a "handle" of this special coroutine before acting. Generally, you should do this: >>> from telethon import TelegramClient, events >>> >>> client = TelegramClient(...) >>> >>> async def main(): >>> async with client.conversation(...) as conv: >>> response = conv.wait_event(events.NewMessage(incoming=True)) >>> await conv.send_message('Hi') >>> response = await response This way your event can be registered before acting, since the response may arrive before your event was registered. It depends on your use case since this also means the event can arrive before you send a previous action. """ start_time = time.time() if isinstance(event, type): event = event() await event.resolve(self._client) counter = Conversation._custom_counter Conversation._custom_counter += 1 future = asyncio.Future(loop=self._client.loop) # We need the `async def` here because we want to block on the future # from `_get_result` by using `await` on it. If we returned the future # immediately we would `del` from `_custom` too early. async def result(): try: return await self._get_result(future, start_time, timeout) finally: del self._custom[counter] self._custom[counter] = (event, future) return await result()
[ "async", "def", "wait_event", "(", "self", ",", "event", ",", "*", ",", "timeout", "=", "None", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "if", "isinstance", "(", "event", ",", "type", ")", ":", "event", "=", "event", "(", ")", ...
Waits for a custom event to occur. Timeouts still apply. Unless you're certain that your code will run fast enough, generally you should get a "handle" of this special coroutine before acting. Generally, you should do this: >>> from telethon import TelegramClient, events >>> >>> client = TelegramClient(...) >>> >>> async def main(): >>> async with client.conversation(...) as conv: >>> response = conv.wait_event(events.NewMessage(incoming=True)) >>> await conv.send_message('Hi') >>> response = await response This way your event can be registered before acting, since the response may arrive before your event was registered. It depends on your use case since this also means the event can arrive before you send a previous action.
[ "Waits", "for", "a", "custom", "event", "to", "occur", ".", "Timeouts", "still", "apply", "." ]
python
train
robotpy/pyfrc
lib/pyfrc/physics/drivetrains.py
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/lib/pyfrc/physics/drivetrains.py#L326-L342
def mecanum_drivetrain( lr_motor, rr_motor, lf_motor, rf_motor, x_wheelbase=2, y_wheelbase=3, speed=5, deadzone=None, ): """ .. deprecated:: 2018.2.0 Use :class:`MecanumDrivetrain` instead """ return MecanumDrivetrain(x_wheelbase, y_wheelbase, speed, deadzone).get_vector( lr_motor, rr_motor, lf_motor, rf_motor )
[ "def", "mecanum_drivetrain", "(", "lr_motor", ",", "rr_motor", ",", "lf_motor", ",", "rf_motor", ",", "x_wheelbase", "=", "2", ",", "y_wheelbase", "=", "3", ",", "speed", "=", "5", ",", "deadzone", "=", "None", ",", ")", ":", "return", "MecanumDrivetrain",...
.. deprecated:: 2018.2.0 Use :class:`MecanumDrivetrain` instead
[ "..", "deprecated", "::", "2018", ".", "2", ".", "0", "Use", ":", "class", ":", "MecanumDrivetrain", "instead" ]
python
train
jstitch/MambuPy
MambuPy/rest/mambutask.py
https://github.com/jstitch/MambuPy/blob/2af98cc12e7ed5ec183b3e97644e880e70b79ee8/MambuPy/rest/mambutask.py#L107-L130
def convertDict2Attrs(self, *args, **kwargs): """The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Task object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuTask just created. """ for n,a in enumerate(self.attrs): # ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE! try: params = self.params except AttributeError as aerr: params = {} kwargs.update(params) try: task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs) except AttributeError as ae: self.mambutaskclass = MambuTask task = self.mambutaskclass(urlfunc=None, entid=None, *args, **kwargs) task.init(a, *args, **kwargs) self.attrs[n] = task
[ "def", "convertDict2Attrs", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "n", ",", "a", "in", "enumerate", "(", "self", ".", "attrs", ")", ":", "# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!", "try", ":",...
The trick for iterable Mambu Objects comes here: You iterate over each element of the responded List from Mambu, and create a Mambu Task object for each one, initializing them one at a time, and changing the attrs attribute (which just holds a list of plain dictionaries) with a MambuTask just created.
[ "The", "trick", "for", "iterable", "Mambu", "Objects", "comes", "here", ":" ]
python
train
pandas-dev/pandas
pandas/core/strings.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L1676-L1697
def str_decode(arr, encoding, errors="strict"): """ Decode character string in the Series/Index using indicated encoding. Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in python3. Parameters ---------- encoding : str errors : str, optional Returns ------- Series or Index """ if encoding in _cpython_optimized_decoders: # CPython optimized implementation f = lambda x: x.decode(encoding, errors) else: decoder = codecs.getdecoder(encoding) f = lambda x: decoder(x, errors)[0] return _na_map(f, arr)
[ "def", "str_decode", "(", "arr", ",", "encoding", ",", "errors", "=", "\"strict\"", ")", ":", "if", "encoding", "in", "_cpython_optimized_decoders", ":", "# CPython optimized implementation", "f", "=", "lambda", "x", ":", "x", ".", "decode", "(", "encoding", "...
Decode character string in the Series/Index using indicated encoding. Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in python3. Parameters ---------- encoding : str errors : str, optional Returns ------- Series or Index
[ "Decode", "character", "string", "in", "the", "Series", "/", "Index", "using", "indicated", "encoding", ".", "Equivalent", "to", ":", "meth", ":", "str", ".", "decode", "in", "python2", "and", ":", "meth", ":", "bytes", ".", "decode", "in", "python3", "....
python
train
maxcountryman/atomos
atomos/atom.py
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/atom.py#L174-L183
def reset(self, newval): ''' Resets the atom's value to `newval`, returning `newval`. :param newval: The new value to set. ''' oldval = self._state.get() self._state.set(newval) self.notify_watches(oldval, newval) return newval
[ "def", "reset", "(", "self", ",", "newval", ")", ":", "oldval", "=", "self", ".", "_state", ".", "get", "(", ")", "self", ".", "_state", ".", "set", "(", "newval", ")", "self", ".", "notify_watches", "(", "oldval", ",", "newval", ")", "return", "ne...
Resets the atom's value to `newval`, returning `newval`. :param newval: The new value to set.
[ "Resets", "the", "atom", "s", "value", "to", "newval", "returning", "newval", "." ]
python
train
genialis/resolwe
resolwe/flow/managers/listener.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/managers/listener.py#L153-L158
def _queue_response_channel(self, obj): """Generate the feedback channel name from the object's id. :param obj: The Channels message object. """ return '{}.{}'.format(state.MANAGER_EXECUTOR_CHANNELS.queue_response, obj[ExecutorProtocol.DATA_ID])
[ "def", "_queue_response_channel", "(", "self", ",", "obj", ")", ":", "return", "'{}.{}'", ".", "format", "(", "state", ".", "MANAGER_EXECUTOR_CHANNELS", ".", "queue_response", ",", "obj", "[", "ExecutorProtocol", ".", "DATA_ID", "]", ")" ]
Generate the feedback channel name from the object's id. :param obj: The Channels message object.
[ "Generate", "the", "feedback", "channel", "name", "from", "the", "object", "s", "id", "." ]
python
train
markovmodel/msmtools
msmtools/util/annotators.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/util/annotators.py#L232-L248
def estimation_required(func, *args, **kw): """ Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated function is called before estimator.estimate() has been called. If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e., @property @estimation_required def func(self): .... """ self = args[0] if len(args) > 0 else None if self and hasattr(self, '_estimated') and not self._estimated: raise ValueError("Tried calling %s on %s which requires the estimator to be estimated." % (func.__name__, self.__class__.__name__)) return func(*args, **kw)
[ "def", "estimation_required", "(", "func", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "self", "=", "args", "[", "0", "]", "if", "len", "(", "args", ")", ">", "0", "else", "None", "if", "self", "and", "hasattr", "(", "self", ",", "'_estimate...
Decorator checking the self._estimated flag in an Estimator instance, raising a value error if the decorated function is called before estimator.estimate() has been called. If mixed with a property-annotation, this annotation needs to come first in the chain of function calls, i.e., @property @estimation_required def func(self): ....
[ "Decorator", "checking", "the", "self", ".", "_estimated", "flag", "in", "an", "Estimator", "instance", "raising", "a", "value", "error", "if", "the", "decorated", "function", "is", "called", "before", "estimator", ".", "estimate", "()", "has", "been", "called...
python
train
hobson/aima
aima/learning.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/learning.py#L638-L646
def Majority(k, n): """Return a DataSet with n k-bit examples of the majority problem: k random bits followed by a 1 if more than half the bits are 1, else 0.""" examples = [] for i in range(n): bits = [random.choice([0, 1]) for i in range(k)] bits.append(int(sum(bits) > k/2)) examples.append(bits) return DataSet(name="majority", examples=examples)
[ "def", "Majority", "(", "k", ",", "n", ")", ":", "examples", "=", "[", "]", "for", "i", "in", "range", "(", "n", ")", ":", "bits", "=", "[", "random", ".", "choice", "(", "[", "0", ",", "1", "]", ")", "for", "i", "in", "range", "(", "k", ...
Return a DataSet with n k-bit examples of the majority problem: k random bits followed by a 1 if more than half the bits are 1, else 0.
[ "Return", "a", "DataSet", "with", "n", "k", "-", "bit", "examples", "of", "the", "majority", "problem", ":", "k", "random", "bits", "followed", "by", "a", "1", "if", "more", "than", "half", "the", "bits", "are", "1", "else", "0", "." ]
python
valid
chaoss/grimoirelab-elk
grimoire_elk/enriched/study_ceres_onion.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/study_ceres_onion.py#L285-L326
def __build_dataframe(self, timing, project_name=None, org_name=None): """Build a DataFrame from a time bucket. :param timing: :param project_name: :param org_name: :return: """ date_list = [] uuid_list = [] name_list = [] contribs_list = [] latest_ts_list = [] logger.debug(self.__log_prefix + " timing: " + timing.key_as_string) for author in timing[self.AUTHOR_UUID].buckets: latest_ts_list.append(timing[self.LATEST_TS].value_as_string) date_list.append(timing.key_as_string) uuid_list.append(author.key) if author[self.AUTHOR_NAME] and author[self.AUTHOR_NAME].buckets \ and len(author[self.AUTHOR_NAME].buckets) > 0: name_list.append(author[self.AUTHOR_NAME].buckets[0].key) else: name_list.append("Unknown") contribs_list.append(author[self.CONTRIBUTIONS].value) df = pandas.DataFrame() df[self.TIMEFRAME] = date_list df[self.AUTHOR_UUID] = uuid_list df[self.AUTHOR_NAME] = name_list df[self.CONTRIBUTIONS] = contribs_list df[self.TIMESTAMP] = latest_ts_list if not project_name: project_name = "_Global_" df[self.PROJECT] = project_name if not org_name: org_name = "_Global_" df[self.AUTHOR_ORG] = org_name return df
[ "def", "__build_dataframe", "(", "self", ",", "timing", ",", "project_name", "=", "None", ",", "org_name", "=", "None", ")", ":", "date_list", "=", "[", "]", "uuid_list", "=", "[", "]", "name_list", "=", "[", "]", "contribs_list", "=", "[", "]", "lates...
Build a DataFrame from a time bucket. :param timing: :param project_name: :param org_name: :return:
[ "Build", "a", "DataFrame", "from", "a", "time", "bucket", "." ]
python
train
jjkester/django-auditlog
src/auditlog/middleware.py
https://github.com/jjkester/django-auditlog/blob/a22978e05b7ed43b87e4b6109550b86c738578fe/src/auditlog/middleware.py#L58-L65
def process_exception(self, request, exception): """ Disconnects the signal receiver to prevent it from staying active in case of an exception. """ if hasattr(threadlocal, 'auditlog'): pre_save.disconnect(sender=LogEntry, dispatch_uid=threadlocal.auditlog['signal_duid']) return None
[ "def", "process_exception", "(", "self", ",", "request", ",", "exception", ")", ":", "if", "hasattr", "(", "threadlocal", ",", "'auditlog'", ")", ":", "pre_save", ".", "disconnect", "(", "sender", "=", "LogEntry", ",", "dispatch_uid", "=", "threadlocal", "."...
Disconnects the signal receiver to prevent it from staying active in case of an exception.
[ "Disconnects", "the", "signal", "receiver", "to", "prevent", "it", "from", "staying", "active", "in", "case", "of", "an", "exception", "." ]
python
train
biolink/ontobio
ontobio/lexmap.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/lexmap.py#L432-L467
def compare_to_xrefs(self, xg1, xg2): """ Compares a base xref graph with another one """ ont = self.merged_ontology for (i,j,d) in xg1.edges(data=True): ont_left = self._id_to_ontology(i) ont_right = self._id_to_ontology(j) unique_lr = True num_xrefs_left = 0 same_left = False if i in xg2: for j2 in xg2.neighbors(i): ont_right2 = self._id_to_ontology(j2) if ont_right2 == ont_right: unique_lr = False num_xrefs_left += 1 if j2 == j: same_left = True unique_rl = True num_xrefs_right = 0 same_right = False if j in xg2: for i2 in xg2.neighbors(j): ont_left2 = self._id_to_ontology(i2) if ont_left2 == ont_left: unique_rl = False num_xrefs_right += 1 if i2 == i: same_right = True (x,y) = d['idpair'] xg1[x][y]['left_novel'] = num_xrefs_left==0 xg1[x][y]['right_novel'] = num_xrefs_right==0 xg1[x][y]['left_consistent'] = same_left xg1[x][y]['right_consistent'] = same_right
[ "def", "compare_to_xrefs", "(", "self", ",", "xg1", ",", "xg2", ")", ":", "ont", "=", "self", ".", "merged_ontology", "for", "(", "i", ",", "j", ",", "d", ")", "in", "xg1", ".", "edges", "(", "data", "=", "True", ")", ":", "ont_left", "=", "self"...
Compares a base xref graph with another one
[ "Compares", "a", "base", "xref", "graph", "with", "another", "one" ]
python
train
dariusbakunas/rawdisk
rawdisk/plugins/filesystems/ntfs/bootsector.py
https://github.com/dariusbakunas/rawdisk/blob/1dc9d0b377fe5da3c406ccec4abc238c54167403/rawdisk/plugins/filesystems/ntfs/bootsector.py#L63-L69
def mft_offset(self): """ Returns: int: MFT Table offset from the beginning of the partition in bytes """ return self.bpb.bytes_per_sector * \ self.bpb.sectors_per_cluster * self.extended_bpb.mft_cluster
[ "def", "mft_offset", "(", "self", ")", ":", "return", "self", ".", "bpb", ".", "bytes_per_sector", "*", "self", ".", "bpb", ".", "sectors_per_cluster", "*", "self", ".", "extended_bpb", ".", "mft_cluster" ]
Returns: int: MFT Table offset from the beginning of the partition in bytes
[ "Returns", ":", "int", ":", "MFT", "Table", "offset", "from", "the", "beginning", "of", "the", "partition", "in", "bytes" ]
python
train
rootpy/rootpy
rootpy/plotting/style/cmstdr/labels.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/style/cmstdr/labels.py#L15-L50
def CMS_label(text="Preliminary 2012", sqrts=8, pad=None): """ Add a 'CMS Preliminary' style label to the current Pad. The blurbs are drawn in the top margin. The label "CMS " + text is drawn in the upper left. If sqrts is None, it will be omitted. Otherwise, it will be drawn in the upper right. """ if pad is None: pad = ROOT.gPad with preserve_current_canvas(): pad.cd() left_margin = pad.GetLeftMargin() top_margin = pad.GetTopMargin() ypos = 1 - top_margin / 2. l = ROOT.TLatex(left_margin, ypos, "CMS " + text) l.SetTextAlign(12) # left-middle l.SetNDC() # The text is 90% as tall as the margin it lives in. l.SetTextSize(0.90 * top_margin) l.Draw() keepalive(pad, l) # Draw sqrt(s) label, if desired if sqrts: right_margin = pad.GetRightMargin() p = ROOT.TLatex(1 - right_margin, ypos, "#sqrt{{s}}={0:d}TeV".format(sqrts)) p.SetTextAlign(32) # right-middle p.SetNDC() p.SetTextSize(0.90 * top_margin) p.Draw() keepalive(pad, p) else: p = None pad.Modified() pad.Update() return l, p
[ "def", "CMS_label", "(", "text", "=", "\"Preliminary 2012\"", ",", "sqrts", "=", "8", ",", "pad", "=", "None", ")", ":", "if", "pad", "is", "None", ":", "pad", "=", "ROOT", ".", "gPad", "with", "preserve_current_canvas", "(", ")", ":", "pad", ".", "c...
Add a 'CMS Preliminary' style label to the current Pad. The blurbs are drawn in the top margin. The label "CMS " + text is drawn in the upper left. If sqrts is None, it will be omitted. Otherwise, it will be drawn in the upper right.
[ "Add", "a", "CMS", "Preliminary", "style", "label", "to", "the", "current", "Pad", "." ]
python
train
NetEaseGame/ATX
atx/imutils.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/imutils.py#L47-L69
def open(image): ''' Args: - image: support many type. filepath or url or data:image/png:base64 Return: Pattern Raises IOError ''' if isinstance(image, six.string_types): name = image if name.startswith('data:image/'): return _open_data_url(name) if re.match(r'^https?://', name): return url_to_image(name) if os.path.isfile(name): img = cv2.imread(name) if img is None: raise IOError("Image format error: %s" % name) return img raise IOError("Open image(%s) not found" % name) return image
[ "def", "open", "(", "image", ")", ":", "if", "isinstance", "(", "image", ",", "six", ".", "string_types", ")", ":", "name", "=", "image", "if", "name", ".", "startswith", "(", "'data:image/'", ")", ":", "return", "_open_data_url", "(", "name", ")", "if...
Args: - image: support many type. filepath or url or data:image/png:base64 Return: Pattern Raises IOError
[ "Args", ":", "-", "image", ":", "support", "many", "type", ".", "filepath", "or", "url", "or", "data", ":", "image", "/", "png", ":", "base64", "Return", ":", "Pattern", "Raises", "IOError" ]
python
train
MacHu-GWU/angora-project
angora/markup/html.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/markup/html.py#L54-L83
def input_ratio(self, name, choices, labels, multi_line=False): """ {% for value, label in [('choice1', '选项1'), ('choice2', '选项2')] %} {% if ratio == value %} {% set checked = "checked" %} {% else %} {% set checked = "" %} {% endif %} <input type="radio" name="ratio" value="{{value}}" {{checked}} /> {{label}} {% endfor %} """ if len(choices) != len(labels): raise ValueError("The size of 'choices' and 'labels' doesn't match!") choice_label = list(zip(choices, labels)) lines = list() lines.append('{%% for value, label in %s %%}' % repr(choice_label)) lines.append(self.tab + '{%% if %s == value %%}' % name) lines.append(self.tab * 2 + '{% set checked = "checked" %}') lines.append(self.tab + '{% else %}') lines.append(self.tab * 2 + '{% set checked = "" %}') lines.append(self.tab + '{% endif %}') if multi_line: line_break = "<br>" else: line_break = "" lines.append(self.tab + '<input type="radio" name="%s" value="{{value}}" {{checked}} /> {{label}} %s' % (name, line_break)) lines.append('{% endfor %}') return "\n".join(lines)
[ "def", "input_ratio", "(", "self", ",", "name", ",", "choices", ",", "labels", ",", "multi_line", "=", "False", ")", ":", "if", "len", "(", "choices", ")", "!=", "len", "(", "labels", ")", ":", "raise", "ValueError", "(", "\"The size of 'choices' and 'labe...
{% for value, label in [('choice1', '选项1'), ('choice2', '选项2')] %} {% if ratio == value %} {% set checked = "checked" %} {% else %} {% set checked = "" %} {% endif %} <input type="radio" name="ratio" value="{{value}}" {{checked}} /> {{label}} {% endfor %}
[ "{", "%", "for", "value", "label", "in", "[", "(", "choice1", "选项1", ")", "(", "choice2", "选项2", ")", "]", "%", "}", "{", "%", "if", "ratio", "==", "value", "%", "}", "{", "%", "set", "checked", "=", "checked", "%", "}", "{", "%", "else", "%"...
python
train
juiceinc/recipe
recipe/shelf.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L561-L566
def filter_ids(self): """ Return the Metrics on this shelf in the order in which they were used. """ return self._sorted_ingredients([ d.id for d in self.values() if isinstance(d, Filter) ])
[ "def", "filter_ids", "(", "self", ")", ":", "return", "self", ".", "_sorted_ingredients", "(", "[", "d", ".", "id", "for", "d", "in", "self", ".", "values", "(", ")", "if", "isinstance", "(", "d", ",", "Filter", ")", "]", ")" ]
Return the Metrics on this shelf in the order in which they were used.
[ "Return", "the", "Metrics", "on", "this", "shelf", "in", "the", "order", "in", "which", "they", "were", "used", "." ]
python
train
jleclanche/fireplace
fireplace/player.py
https://github.com/jleclanche/fireplace/blob/d0fc0e97e185c0210de86631be20638659c0609e/fireplace/player.py#L184-L190
def can_pay_cost(self, card): """ Returns whether the player can pay the resource cost of a card. """ if self.spells_cost_health and card.type == CardType.SPELL: return self.hero.health > card.cost return self.mana >= card.cost
[ "def", "can_pay_cost", "(", "self", ",", "card", ")", ":", "if", "self", ".", "spells_cost_health", "and", "card", ".", "type", "==", "CardType", ".", "SPELL", ":", "return", "self", ".", "hero", ".", "health", ">", "card", ".", "cost", "return", "self...
Returns whether the player can pay the resource cost of a card.
[ "Returns", "whether", "the", "player", "can", "pay", "the", "resource", "cost", "of", "a", "card", "." ]
python
train
stephanepechard/projy
projy/templates/DjangoProjectTemplate.py
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/templates/DjangoProjectTemplate.py#L23-L30
def directories(self): """ Return the names of directories to be created. """ directories_description = [ self.project_name, self.project_name + '/conf', self.project_name + '/static', ] return directories_description
[ "def", "directories", "(", "self", ")", ":", "directories_description", "=", "[", "self", ".", "project_name", ",", "self", ".", "project_name", "+", "'/conf'", ",", "self", ".", "project_name", "+", "'/static'", ",", "]", "return", "directories_description" ]
Return the names of directories to be created.
[ "Return", "the", "names", "of", "directories", "to", "be", "created", "." ]
python
train
atlassian-api/atlassian-python-api
atlassian/jira.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/jira.py#L100-L108
def user_update(self, username, data): """ Update user attributes based on json :param username: :param data: :return: """ url = 'rest/api/2/user?username={0}'.format(username) return self.put(url, data=data)
[ "def", "user_update", "(", "self", ",", "username", ",", "data", ")", ":", "url", "=", "'rest/api/2/user?username={0}'", ".", "format", "(", "username", ")", "return", "self", ".", "put", "(", "url", ",", "data", "=", "data", ")" ]
Update user attributes based on json :param username: :param data: :return:
[ "Update", "user", "attributes", "based", "on", "json", ":", "param", "username", ":", ":", "param", "data", ":", ":", "return", ":" ]
python
train
django-danceschool/django-danceschool
danceschool/private_events/feeds.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/private_events/feeds.py#L122-L159
def json_event_feed(request,location_id=None,room_id=None): ''' The Jquery fullcalendar app requires a JSON news feed, so this function creates the feed from upcoming PrivateEvent objects ''' if not getConstant('calendar__privateCalendarFeedEnabled') or not request.user.is_staff: return JsonResponse({}) this_user = request.user startDate = request.GET.get('start','') endDate = request.GET.get('end','') timeZone = request.GET.get('timezone',getattr(settings,'TIME_ZONE','UTC')) time_filter_dict_events = {} if startDate: time_filter_dict_events['startTime__gte'] = ensure_timezone(datetime.strptime(startDate,'%Y-%m-%d')) if endDate: time_filter_dict_events['endTime__lte'] = ensure_timezone(datetime.strptime(endDate,'%Y-%m-%d')) + timedelta(days=1) instructor_groups = list(this_user.groups.all().values_list('id',flat=True)) filters = Q(event__privateevent__isnull=False) & ( Q(event__privateevent__displayToGroup__in=instructor_groups) | Q(event__privateevent__displayToUsers=this_user) | (Q(event__privateevent__displayToGroup__isnull=True) & Q(event__privateevent__displayToUsers__isnull=True)) ) if location_id: filters = filters & Q(event__location__id=location_id) if room_id: filters = filters & Q(event__room_id=room_id) occurrences = EventOccurrence.objects.filter(filters).filter(**time_filter_dict_events).order_by('-startTime') eventlist = [EventFeedItem(x,timeZone=timeZone).__dict__ for x in occurrences] return JsonResponse(eventlist,safe=False)
[ "def", "json_event_feed", "(", "request", ",", "location_id", "=", "None", ",", "room_id", "=", "None", ")", ":", "if", "not", "getConstant", "(", "'calendar__privateCalendarFeedEnabled'", ")", "or", "not", "request", ".", "user", ".", "is_staff", ":", "return...
The Jquery fullcalendar app requires a JSON news feed, so this function creates the feed from upcoming PrivateEvent objects
[ "The", "Jquery", "fullcalendar", "app", "requires", "a", "JSON", "news", "feed", "so", "this", "function", "creates", "the", "feed", "from", "upcoming", "PrivateEvent", "objects" ]
python
train
googledatalab/pydatalab
datalab/storage/_item.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/datalab/storage/_item.py#L111-L128
def copy_to(self, new_key, bucket=None): """Copies this item to the specified new key. Args: new_key: the new key to copy this item to. bucket: the bucket of the new item; if None (the default) use the same bucket. Returns: An Item corresponding to new key. Raises: Exception if there was an error copying the item. """ if bucket is None: bucket = self._bucket try: new_info = self._api.objects_copy(self._bucket, self._key, bucket, new_key) except Exception as e: raise e return Item(bucket, new_key, new_info, context=self._context)
[ "def", "copy_to", "(", "self", ",", "new_key", ",", "bucket", "=", "None", ")", ":", "if", "bucket", "is", "None", ":", "bucket", "=", "self", ".", "_bucket", "try", ":", "new_info", "=", "self", ".", "_api", ".", "objects_copy", "(", "self", ".", ...
Copies this item to the specified new key. Args: new_key: the new key to copy this item to. bucket: the bucket of the new item; if None (the default) use the same bucket. Returns: An Item corresponding to new key. Raises: Exception if there was an error copying the item.
[ "Copies", "this", "item", "to", "the", "specified", "new", "key", "." ]
python
train