repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
tarzanjw/python-mysql-binlog-to-blinker
setup.py
https://github.com/tarzanjw/python-mysql-binlog-to-blinker/blob/d61ab5962345377e142a225b16f731ab4196fc26/setup.py#L150-L164
def _lint(): """Run lint and return an exit code.""" # Flake8 doesn't have an easy way to run checks using a Python function, so # just fork off another process to do it. # Python 3 compat: # - The result of subprocess call outputs are byte strings, meaning we need # to pass a byte string to endswith. project_python_files = [filename for filename in get_project_files() if filename.endswith(b'.py')] retcode = subprocess.call( ['flake8', '--max-complexity=10'] + project_python_files) if retcode == 0: print_success_message('No style errors') return retcode
[ "def", "_lint", "(", ")", ":", "# Flake8 doesn't have an easy way to run checks using a Python function, so", "# just fork off another process to do it.", "# Python 3 compat:", "# - The result of subprocess call outputs are byte strings, meaning we need", "# to pass a byte string to endswith.", ...
Run lint and return an exit code.
[ "Run", "lint", "and", "return", "an", "exit", "code", "." ]
python
train
42.133333
openstack/pyghmi
pyghmi/ipmi/console.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/console.py#L459-L531
def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
[ "def", "_got_sol_payload", "(", "self", ",", "payload", ")", ":", "# TODO(jbjohnso) test cases to throw some likely scenarios at functions", "# for example, retry with new data, retry with no new data", "# retry with unexpected sequence number", "if", "type", "(", "payload", ")", "==...
SOL payload callback
[ "SOL", "payload", "callback" ]
python
train
49.041096
rodluger/everest
everest/basecamp.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/basecamp.py#L1077-L1178
def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. """ lnl = 0 # Re-factorize the Cholesky decomposition? try: self._ll_info except AttributeError: refactor = True if refactor: # Smooth the light curve and reset the outlier mask t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) # Now re-factorize the Cholesky decomposition self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] # Reset the outlier masks self.outmask = outmask self.transitmask = transitmask # Compute the likelihood for each chunk amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Get the inverse covariance and the mask CDK = self._ll_info[b][0] m = self._ll_info[b][1] # Compute the maximum likelihood model amplitude # (for transits, this is the transit depth) var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) # Compute the residual r = self.fraw[m] - amp[b] * model[m] # Finally, compute the likelihood lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: # We need to multiply the Gaussians for all chunks to get the # amplitude and amplitude variance for the entire dataset vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
[ "def", "lnlike", "(", "self", ",", "model", ",", "refactor", "=", "False", ",", "pos_tol", "=", "2.5", ",", "neg_tol", "=", "50.", ",", "full_output", "=", "False", ")", ":", "lnl", "=", "0", "# Re-factorize the Cholesky decomposition?", "try", ":", "self"...
r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`.
[ "r", "Return", "the", "likelihood", "of", "the", "astrophysical", "model", "model", "." ]
python
train
43.980392
django-danceschool/django-danceschool
danceschool/core/classreg.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/classreg.py#L666-L699
def form_valid(self,form): ''' Even if this form is valid, the handlers for this form may have added messages to the request. In that case, then the page should be handled as if the form were invalid. Otherwise, update the session data with the form data and then move to the next view ''' reg = self.temporaryRegistration # The session expires after a period of inactivity that is specified in preferences. expiry = timezone.now() + timedelta(minutes=getConstant('registration__sessionExpiryMinutes')) self.request.session[REG_VALIDATION_STR]["temporaryRegistrationExpiry"] = \ expiry.strftime('%Y-%m-%dT%H:%M:%S%z') self.request.session.modified = True # Update the expiration date for this registration, and pass in the data from # this form. reg.expirationDate = expiry reg.firstName = form.cleaned_data.pop('firstName') reg.lastName = form.cleaned_data.pop('lastName') reg.email = form.cleaned_data.pop('email') reg.phone = form.cleaned_data.pop('phone', None) reg.student = form.cleaned_data.pop('student',False) reg.comments = form.cleaned_data.pop('comments',None) reg.howHeardAboutUs = form.cleaned_data.pop('howHeardAboutUs',None) # Anything else in the form goes to the TemporaryRegistration data. reg.data.update(form.cleaned_data) reg.save() # This signal (formerly the post_temporary_registration signal) allows # vouchers to be applied temporarily, and it can be used for other tasks post_student_info.send(sender=StudentInfoView,registration=reg) return HttpResponseRedirect(self.get_success_url())
[ "def", "form_valid", "(", "self", ",", "form", ")", ":", "reg", "=", "self", ".", "temporaryRegistration", "# The session expires after a period of inactivity that is specified in preferences.", "expiry", "=", "timezone", ".", "now", "(", ")", "+", "timedelta", "(", "...
Even if this form is valid, the handlers for this form may have added messages to the request. In that case, then the page should be handled as if the form were invalid. Otherwise, update the session data with the form data and then move to the next view
[ "Even", "if", "this", "form", "is", "valid", "the", "handlers", "for", "this", "form", "may", "have", "added", "messages", "to", "the", "request", ".", "In", "that", "case", "then", "the", "page", "should", "be", "handled", "as", "if", "the", "form", "...
python
train
50.676471
inveniosoftware/invenio-communities
invenio_communities/forms.py
https://github.com/inveniosoftware/invenio-communities/blob/5c4de6783724d276ae1b6dd13a399a9e22fadc7a/invenio_communities/forms.py#L154-L161
def validate_identifier(self, field): """Validate field identifier.""" if field.data: field.data = field.data.lower() if Community.get(field.data, with_deleted=True): raise validators.ValidationError( _('The identifier already exists. ' 'Please choose a different one.'))
[ "def", "validate_identifier", "(", "self", ",", "field", ")", ":", "if", "field", ".", "data", ":", "field", ".", "data", "=", "field", ".", "data", ".", "lower", "(", ")", "if", "Community", ".", "get", "(", "field", ".", "data", ",", "with_deleted"...
Validate field identifier.
[ "Validate", "field", "identifier", "." ]
python
train
45.25
aliyun/aliyun-odps-python-sdk
odps/models/instance.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/models/instance.py#L654-L690
def get_logview_address(self, hours=None): """ Get logview address of the instance object by hours. :param hours: :return: logview address :rtype: str """ hours = hours or options.log_view_hours project = self.project url = '%s/authorization' % project.resource() policy = { 'expires_in_hours': hours, 'policy': { 'Statement': [{ 'Action': ['odps:Read'], 'Effect': 'Allow', 'Resource': 'acs:odps:*:projects/%s/instances/%s' % \ (project.name, self.id) }], 'Version': '1', } } headers = {'Content-Type': 'application/json'} params = {'sign_bearer_token': ''} data = json.dumps(policy) res = self._client.post(url, data, headers=headers, params=params) content = res.text if six.PY3 else res.content root = ElementTree.fromstring(content) token = root.find('Result').text link = options.log_view_host + "/logview/?h=" + self._client.endpoint + "&p=" \ + project.name + "&i=" + self.id + "&token=" + token return link
[ "def", "get_logview_address", "(", "self", ",", "hours", "=", "None", ")", ":", "hours", "=", "hours", "or", "options", ".", "log_view_hours", "project", "=", "self", ".", "project", "url", "=", "'%s/authorization'", "%", "project", ".", "resource", "(", "...
Get logview address of the instance object by hours. :param hours: :return: logview address :rtype: str
[ "Get", "logview", "address", "of", "the", "instance", "object", "by", "hours", "." ]
python
train
33.351351
azavea/python-omgeo
omgeo/services/base.py
https://github.com/azavea/python-omgeo/blob/40f4e006f087dbc795a5d954ffa2c0eab433f8c9/omgeo/services/base.py#L168-L174
def _get_xml_doc(self, endpoint, query, is_post=False): """ Return False if connection could not be made. Otherwise, return a minidom Document. """ response = self._get_response(endpoint, query, is_post=is_post) return minidom.parse(response.text)
[ "def", "_get_xml_doc", "(", "self", ",", "endpoint", ",", "query", ",", "is_post", "=", "False", ")", ":", "response", "=", "self", ".", "_get_response", "(", "endpoint", ",", "query", ",", "is_post", "=", "is_post", ")", "return", "minidom", ".", "parse...
Return False if connection could not be made. Otherwise, return a minidom Document.
[ "Return", "False", "if", "connection", "could", "not", "be", "made", ".", "Otherwise", "return", "a", "minidom", "Document", "." ]
python
train
41.285714
google/mobly
mobly/controllers/android_device.py
https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device.py#L937-L954
def reboot(self): """Reboots the device. Generally one should use this method to reboot the device instead of directly calling `adb.reboot`. Because this method gracefully handles the teardown and restoration of running services. This method is blocking and only returns when the reboot has completed and the services restored. Raises: Error: Waiting for completion timed out. """ if self.is_bootloader: self.fastboot.reboot() return with self.handle_reboot(): self.adb.reboot()
[ "def", "reboot", "(", "self", ")", ":", "if", "self", ".", "is_bootloader", ":", "self", ".", "fastboot", ".", "reboot", "(", ")", "return", "with", "self", ".", "handle_reboot", "(", ")", ":", "self", ".", "adb", ".", "reboot", "(", ")" ]
Reboots the device. Generally one should use this method to reboot the device instead of directly calling `adb.reboot`. Because this method gracefully handles the teardown and restoration of running services. This method is blocking and only returns when the reboot has completed and the services restored. Raises: Error: Waiting for completion timed out.
[ "Reboots", "the", "device", "." ]
python
train
32.888889
Erotemic/utool
utool/_internal/util_importer.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/_internal/util_importer.py#L410-L417
def make_initstr(modname, import_tuples, verbose=False): """ Just creates the string representation. Does no importing. """ imports = [tup[0] for tup in import_tuples] from_imports = __get_from_imports(import_tuples) inject_execstr = _inject_execstr(modname, import_tuples) return _initstr(modname, imports, from_imports, inject_execstr)
[ "def", "make_initstr", "(", "modname", ",", "import_tuples", ",", "verbose", "=", "False", ")", ":", "imports", "=", "[", "tup", "[", "0", "]", "for", "tup", "in", "import_tuples", "]", "from_imports", "=", "__get_from_imports", "(", "import_tuples", ")", ...
Just creates the string representation. Does no importing.
[ "Just", "creates", "the", "string", "representation", ".", "Does", "no", "importing", "." ]
python
train
44.75
pavoni/pyvera
pyvera/subscribe.py
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/subscribe.py#L41-L53
def register(self, device, callback): """Register a callback. device: device to be updated by subscription callback: callback for notification of changes """ if not device: logger.error("Received an invalid device: %r", device) return logger.debug("Subscribing to events for %s", device.name) self._devices[device.vera_device_id].append(device) self._callbacks[device].append(callback)
[ "def", "register", "(", "self", ",", "device", ",", "callback", ")", ":", "if", "not", "device", ":", "logger", ".", "error", "(", "\"Received an invalid device: %r\"", ",", "device", ")", "return", "logger", ".", "debug", "(", "\"Subscribing to events for %s\""...
Register a callback. device: device to be updated by subscription callback: callback for notification of changes
[ "Register", "a", "callback", "." ]
python
train
35.615385
SHTOOLS/SHTOOLS
pyshtools/shclasses/shcoeffsgrid.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shcoeffsgrid.py#L2475-L2498
def lats(self, degrees=True): """ Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians. """ if degrees is False: return _np.radians(self._lats()) else: return self._lats()
[ "def", "lats", "(", "self", ",", "degrees", "=", "True", ")", ":", "if", "degrees", "is", "False", ":", "return", "_np", ".", "radians", "(", "self", ".", "_lats", "(", ")", ")", "else", ":", "return", "self", ".", "_lats", "(", ")" ]
Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians.
[ "Return", "the", "latitudes", "of", "each", "row", "of", "the", "gridded", "data", "." ]
python
train
26.833333
dailymuse/oz
oz/sqlalchemy/middleware.py
https://github.com/dailymuse/oz/blob/4329f6a207dc9d2a8fbeb4d16d415dbe4570b5bd/oz/sqlalchemy/middleware.py#L20-L30
def db(self, connection_string=None): """Gets the SQLALchemy session for this request""" connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
[ "def", "db", "(", "self", ",", "connection_string", "=", "None", ")", ":", "connection_string", "=", "connection_string", "or", "self", ".", "settings", "[", "\"db\"", "]", "if", "not", "hasattr", "(", "self", ",", "\"_db_conns\"", ")", ":", "self", ".", ...
Gets the SQLALchemy session for this request
[ "Gets", "the", "SQLALchemy", "session", "for", "this", "request" ]
python
train
40.090909
spyder-ide/spyder
spyder/app/mainwindow.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/app/mainwindow.py#L3274-L3318
def run_spyder(app, options, args): """ Create and show Spyder's main window Start QApplication event loop """ #TODO: insert here # Main window main = MainWindow(options) try: main.setup() except BaseException: if main.console is not None: try: main.console.shell.exit_interpreter() except BaseException: pass raise main.show() main.post_visible_setup() if main.console: main.console.shell.interpreter.namespace['spy'] = \ Spy(app=app, window=main) # Open external files passed as args if args: for a in args: main.open_external_file(a) # Don't show icons in menus for Mac if sys.platform == 'darwin': QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True) # Open external files with our Mac app if running_in_mac_app(): app.sig_open_external_file.connect(main.open_external_file) # To give focus again to the last focused widget after restoring # the window app.focusChanged.connect(main.change_last_focused_widget) if not running_under_pytest(): app.exec_() return main
[ "def", "run_spyder", "(", "app", ",", "options", ",", "args", ")", ":", "#TODO: insert here\r", "# Main window\r", "main", "=", "MainWindow", "(", "options", ")", "try", ":", "main", ".", "setup", "(", ")", "except", "BaseException", ":", "if", "main", "."...
Create and show Spyder's main window Start QApplication event loop
[ "Create", "and", "show", "Spyder", "s", "main", "window", "Start", "QApplication", "event", "loop" ]
python
train
28.066667
projectshift/shift-boiler
boiler/user/validators.py
https://github.com/projectshift/shift-boiler/blob/8e6f3a3e4b9493fb6c8bd16bed160ede153bfb0b/boiler/user/validators.py#L9-L26
def validate(self, value, model=None, context=None): """ Perform validation """ from boiler.user.services import user_service self_id = None if model: if isinstance(model, dict): self_id = model.get('id') else: self_id = getattr(model, 'id') params = dict() params[self.property] = value found = user_service.first(**params) if not found or (model and self_id == found.id): return Error() return Error(self.error)
[ "def", "validate", "(", "self", ",", "value", ",", "model", "=", "None", ",", "context", "=", "None", ")", ":", "from", "boiler", ".", "user", ".", "services", "import", "user_service", "self_id", "=", "None", "if", "model", ":", "if", "isinstance", "(...
Perform validation
[ "Perform", "validation" ]
python
train
29.944444
orbingol/NURBS-Python
geomdl/linalg.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L538-L562
def frange(start, stop, step=1.0): """ Implementation of Python's ``range()`` function which works with floats. Reference to this implementation: https://stackoverflow.com/a/36091634 :param start: start value :type start: float :param stop: end value :type stop: float :param step: increment :type step: float :return: float :rtype: generator """ i = 0.0 x = float(start) # Prevent yielding integers. x0 = x epsilon = step / 2.0 yield x # always yield first value while x + epsilon < stop: i += 1.0 x = x0 + i * step yield x if stop > x: yield stop
[ "def", "frange", "(", "start", ",", "stop", ",", "step", "=", "1.0", ")", ":", "i", "=", "0.0", "x", "=", "float", "(", "start", ")", "# Prevent yielding integers.", "x0", "=", "x", "epsilon", "=", "step", "/", "2.0", "yield", "x", "# always yield firs...
Implementation of Python's ``range()`` function which works with floats. Reference to this implementation: https://stackoverflow.com/a/36091634 :param start: start value :type start: float :param stop: end value :type stop: float :param step: increment :type step: float :return: float :rtype: generator
[ "Implementation", "of", "Python", "s", "range", "()", "function", "which", "works", "with", "floats", "." ]
python
train
25.28
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L1298-L1311
def _prepare_source(selector, source): """Normalize source rows and selectors.""" tablename, fields = get_data_specifier(selector) if len(fields) != 1: raise ItsdbError( 'Selector must specify exactly one data column: {}' .format(selector) ) if isinstance(source, TestSuite): if not tablename: tablename = source.relations.find(fields[0])[0] source = source[tablename] cols = list(source.fields.keys()) + fields return source, cols
[ "def", "_prepare_source", "(", "selector", ",", "source", ")", ":", "tablename", ",", "fields", "=", "get_data_specifier", "(", "selector", ")", "if", "len", "(", "fields", ")", "!=", "1", ":", "raise", "ItsdbError", "(", "'Selector must specify exactly one data...
Normalize source rows and selectors.
[ "Normalize", "source", "rows", "and", "selectors", "." ]
python
train
36.428571
edibledinos/pwnypack
pwnypack/target.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/target.py#L112-L122
def bits(self): """ The target architecture word size. One of :class:`Target.Bits`. """ if self._bits is None: value = self._DEFAULT_BITS.get(self.arch) if value is None: raise NotImplementedError('Could not determine the default word size of %s architecture.' % self.arch) return value else: return self._bits
[ "def", "bits", "(", "self", ")", ":", "if", "self", ".", "_bits", "is", "None", ":", "value", "=", "self", ".", "_DEFAULT_BITS", ".", "get", "(", "self", ".", "arch", ")", "if", "value", "is", "None", ":", "raise", "NotImplementedError", "(", "'Could...
The target architecture word size. One of :class:`Target.Bits`.
[ "The", "target", "architecture", "word", "size", ".", "One", "of", ":", "class", ":", "Target", ".", "Bits", "." ]
python
train
36.727273
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L1143-L1171
def get_wallet_addresses(wallet_name, api_key, is_hd_wallet=False, zero_balance=None, used=None, omit_addresses=False, coin_symbol='btc'): ''' Returns a list of wallet addresses as well as some meta-data ''' assert is_valid_coin_symbol(coin_symbol) assert api_key assert len(wallet_name) <= 25, wallet_name assert zero_balance in (None, True, False) assert used in (None, True, False) assert isinstance(omit_addresses, bool), omit_addresses params = {'token': api_key} kwargs = {'hd/' if is_hd_wallet else '': wallet_name} # hack! url = make_url(coin_symbol, 'wallets', **kwargs) if zero_balance is True: params['zerobalance'] = 'true' elif zero_balance is False: params['zerobalance'] = 'false' if used is True: params['used'] = 'true' elif used is False: params['used'] = 'false' if omit_addresses: params['omitWalletAddresses'] = 'true' r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
[ "def", "get_wallet_addresses", "(", "wallet_name", ",", "api_key", ",", "is_hd_wallet", "=", "False", ",", "zero_balance", "=", "None", ",", "used", "=", "None", ",", "omit_addresses", "=", "False", ",", "coin_symbol", "=", "'btc'", ")", ":", "assert", "is_v...
Returns a list of wallet addresses as well as some meta-data
[ "Returns", "a", "list", "of", "wallet", "addresses", "as", "well", "as", "some", "meta", "-", "data" ]
python
train
35.896552
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAUtil/QADate.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAUtil/QADate.py#L111-L124
def QA_util_datetime_to_strdatetime(dt): """ :param dt: pythone datetime.datetime :return: 1999-02-01 09:30:91 string type """ strdatetime = "%04d-%02d-%02d %02d:%02d:%02d" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second ) return strdatetime
[ "def", "QA_util_datetime_to_strdatetime", "(", "dt", ")", ":", "strdatetime", "=", "\"%04d-%02d-%02d %02d:%02d:%02d\"", "%", "(", "dt", ".", "year", ",", "dt", ".", "month", ",", "dt", ".", "day", ",", "dt", ".", "hour", ",", "dt", ".", "minute", ",", "d...
:param dt: pythone datetime.datetime :return: 1999-02-01 09:30:91 string type
[ ":", "param", "dt", ":", "pythone", "datetime", ".", "datetime", ":", "return", ":", "1999", "-", "02", "-", "01", "09", ":", "30", ":", "91", "string", "type" ]
python
train
22.785714
materialsproject/pymatgen-db
matgendb/creator.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/creator.py#L513-L587
def generate_doc(self, dir_name, vasprun_files): """ Process aflow style runs, where each run is actually a combination of two vasp runs. """ try: fullpath = os.path.abspath(dir_name) # Defensively copy the additional fields first. This is a MUST. # Otherwise, parallel updates will see the same object and inserts # will be overridden!! d = {k: v for k, v in self.additional_fields.items()} d["dir_name"] = fullpath d["schema_version"] = VaspToDbTaskDrone.__version__ d["calculations"] = [ self.process_vasprun(dir_name, taskname, filename) for taskname, filename in vasprun_files.items()] d1 = d["calculations"][0] d2 = d["calculations"][-1] # Now map some useful info to the root level. for root_key in ["completed_at", "nsites", "unit_cell_formula", "reduced_cell_formula", "pretty_formula", "elements", "nelements", "cif", "density", "is_hubbard", "hubbards", "run_type"]: d[root_key] = d2[root_key] d["chemsys"] = "-".join(sorted(d2["elements"])) # store any overrides to the exchange correlation functional xc = d2["input"]["incar"].get("GGA") if xc: xc = xc.upper() d["input"] = {"crystal": d1["input"]["crystal"], "is_lasph": d2["input"]["incar"].get("LASPH", False), "potcar_spec": d1["input"].get("potcar_spec"), "xc_override": xc} vals = sorted(d2["reduced_cell_formula"].values()) d["anonymous_formula"] = {string.ascii_uppercase[i]: float(vals[i]) for i in range(len(vals))} d["output"] = { "crystal": d2["output"]["crystal"], "final_energy": d2["output"]["final_energy"], "final_energy_per_atom": d2["output"]["final_energy_per_atom"]} d["name"] = "aflow" p = d2["input"]["potcar_type"][0].split("_") pot_type = p[0] functional = "lda" if len(pot_type) == 1 else "_".join(p[1:]) d["pseudo_potential"] = {"functional": functional.lower(), "pot_type": pot_type.lower(), "labels": d2["input"]["potcar"]} if len(d["calculations"]) == len(self.runs) or \ list(vasprun_files.keys())[0] != "relax1": d["state"] = "successful" if d2["has_vasp_completed"] \ else "unsuccessful" else: d["state"] = "stopped" d["analysis"] = get_basic_analysis_and_error_checks(d) sg = SpacegroupAnalyzer(Structure.from_dict(d["output"]["crystal"]), 0.1) d["spacegroup"] = {"symbol": sg.get_space_group_symbol(), "number": sg.get_space_group_number(), "point_group": sg.get_point_group_symbol(), "source": "spglib", "crystal_system": sg.get_crystal_system(), "hall": sg.get_hall()} d["oxide_type"] = d2["oxide_type"] d["last_updated"] = datetime.datetime.today() return d except Exception as ex: import traceback print(traceback.format_exc()) logger.error("Error in " + os.path.abspath(dir_name) + ".\n" + traceback.format_exc()) return None
[ "def", "generate_doc", "(", "self", ",", "dir_name", ",", "vasprun_files", ")", ":", "try", ":", "fullpath", "=", "os", ".", "path", ".", "abspath", "(", "dir_name", ")", "# Defensively copy the additional fields first. This is a MUST.", "# Otherwise, parallel updates ...
Process aflow style runs, where each run is actually a combination of two vasp runs.
[ "Process", "aflow", "style", "runs", "where", "each", "run", "is", "actually", "a", "combination", "of", "two", "vasp", "runs", "." ]
python
train
49.706667
cdgriffith/puremagic
puremagic/main.py
https://github.com/cdgriffith/puremagic/blob/ae2c4c400930b8a19519e787f61dd779db7e415b/puremagic/main.py#L105-L113
def _magic(header, footer, mime, ext=None): """ Discover what type of file it is based on the incoming string """ if not header: raise ValueError("Input was empty") info = _identify_all(header, footer, ext)[0] if mime: return info.mime_type return info.extension if not \ isinstance(info.extension, list) else info[0].extension
[ "def", "_magic", "(", "header", ",", "footer", ",", "mime", ",", "ext", "=", "None", ")", ":", "if", "not", "header", ":", "raise", "ValueError", "(", "\"Input was empty\"", ")", "info", "=", "_identify_all", "(", "header", ",", "footer", ",", "ext", "...
Discover what type of file it is based on the incoming string
[ "Discover", "what", "type", "of", "file", "it", "is", "based", "on", "the", "incoming", "string" ]
python
train
40.333333
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/extensions_v1beta1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L6383-L6407
def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
[ "def", "read_namespaced_replica_set", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "retur...
read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread.
[ "read_namespaced_replica_set", "#", "noqa", ":", "E501" ]
python
train
56.08
robotools/fontParts
Lib/fontParts/base/info.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/info.py#L182-L188
def _round(self, **kwargs): """ Subclasses may override this method. """ mathInfo = self._toMathInfo(guidelines=False) mathInfo = mathInfo.round() self._fromMathInfo(mathInfo, guidelines=False)
[ "def", "_round", "(", "self", ",", "*", "*", "kwargs", ")", ":", "mathInfo", "=", "self", ".", "_toMathInfo", "(", "guidelines", "=", "False", ")", "mathInfo", "=", "mathInfo", ".", "round", "(", ")", "self", ".", "_fromMathInfo", "(", "mathInfo", ",",...
Subclasses may override this method.
[ "Subclasses", "may", "override", "this", "method", "." ]
python
train
33.571429
SmartTeleMax/iktomi
iktomi/utils/url.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/url.py#L64-L77
def uri_to_iri_parts(path, query, fragment): r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert. """ path = url_unquote(path, '%/;?') query = url_unquote(query, '%;/?:@&=+,$#') fragment = url_unquote(fragment, '%;/?:@&=+,$#') return path, query, fragment
[ "def", "uri_to_iri_parts", "(", "path", ",", "query", ",", "fragment", ")", ":", "path", "=", "url_unquote", "(", "path", ",", "'%/;?'", ")", "query", "=", "url_unquote", "(", "query", ",", "'%;/?:@&=+,$#'", ")", "fragment", "=", "url_unquote", "(", "fragm...
r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert.
[ "r", "Converts", "a", "URI", "parts", "to", "corresponding", "IRI", "parts", "in", "a", "given", "charset", "." ]
python
train
34.071429
iotile/coretools
transport_plugins/native_ble/iotile_transport_native_ble/virtual_ble.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/transport_plugins/native_ble/iotile_transport_native_ble/virtual_ble.py#L503-L512
def process(self): """Periodic nonblocking processes""" super(NativeBLEVirtualInterface, self).process() if (not self._stream_sm_running) and (not self.reports.empty()): self._stream_data() if (not self._trace_sm_running) and (not self.traces.empty()): self._send_trace()
[ "def", "process", "(", "self", ")", ":", "super", "(", "NativeBLEVirtualInterface", ",", "self", ")", ".", "process", "(", ")", "if", "(", "not", "self", ".", "_stream_sm_running", ")", "and", "(", "not", "self", ".", "reports", ".", "empty", "(", ")",...
Periodic nonblocking processes
[ "Periodic", "nonblocking", "processes" ]
python
train
32.1
edx/edx-enterprise
enterprise/views.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/views.py#L384-L552
def get(self, request): """ Render a form to collect user input about data sharing consent. """ enterprise_customer_uuid = request.GET.get('enterprise_customer_uuid') success_url = request.GET.get('next') failure_url = request.GET.get('failure_url') course_id = request.GET.get('course_id', '') program_uuid = request.GET.get('program_uuid', '') self.preview_mode = bool(request.GET.get('preview_mode', False)) # Get enterprise_customer to start in case we need to render a custom 404 page # Then go through other business logic to determine (and potentially overwrite) the enterprise customer enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid) context_data = get_global_context(request, enterprise_customer) if not self.preview_mode: if not self.course_or_program_exist(course_id, program_uuid): error_code = 'ENTGDS000' log_message = ( 'Neither the course with course_id: {course_id} ' 'or program with {program_uuid} exist for ' 'enterprise customer {enterprise_customer_uuid}' 'Error code {error_code} presented to user {userid}'.format( course_id=course_id, program_uuid=program_uuid, error_code=error_code, userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: consent_record = get_data_sharing_consent( request.user.username, enterprise_customer_uuid, program_uuid=program_uuid, course_id=course_id ) except NotConnectedToOpenEdX as error: error_code = 'ENTGDS001' log_message = ( 'The was a problem with getting the consent record of user {userid} with ' 'uuid {enterprise_customer_uuid}. get_data_sharing_consent threw ' 'the following NotConnectedToOpenEdX error: {error}' 'for course_id {course_id}.' 'Error code {error_code} presented to user'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, error=error, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: consent_required = consent_record.consent_required() except AttributeError: consent_required = None if consent_record is None or not consent_required: error_code = 'ENTGDS002' log_message = ( 'The was a problem with the consent record of user {userid} with ' 'enterprise_customer_uuid {enterprise_customer_uuid}. consent_record has a value ' 'of {consent_record} and consent_record.consent_required() a ' 'value of {consent_required} for course_id {course_id}. ' 'Error code {error_code} presented to user'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, consent_record=consent_record, consent_required=consent_required, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) else: enterprise_customer = consent_record.enterprise_customer elif not request.user.is_staff: raise PermissionDenied() # Retrieve context data again now that enterprise_customer logic has been run context_data = get_global_context(request, enterprise_customer) if not (enterprise_customer_uuid and success_url and failure_url): error_code = 'ENTGDS003' log_message = ( 'Error: one or more of the following values was falsy: ' 'enterprise_customer_uuid: {enterprise_customer_uuid}, ' 'success_url: {success_url}, ' 'failure_url: {failure_url} for course id {course_id}' 'The following error code was reported to user {userid}: {error_code}'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, success_url=success_url, failure_url=failure_url, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: updated_context_dict = self.get_course_or_program_context( enterprise_customer, course_id=course_id, program_uuid=program_uuid ) context_data.update(updated_context_dict) except Http404: error_code = 'ENTGDS004' log_message = ( 'CourseCatalogApiServiceClient is improperly configured. ' 'Returned error code {error_code} to user {userid} ' 'and enterprise_customer {enterprise_customer} ' 'for course_id {course_id}'.format( error_code=error_code, userid=request.user.id, enterprise_customer=enterprise_customer.uuid, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) item = 'course' if course_id else 'program' # Translators: bold_start and bold_end are HTML tags for specifying enterprise name in bold text. context_data.update({ 'consent_request_prompt': _( 'To access this {item}, you must first consent to share your learning achievements ' 'with {bold_start}{enterprise_customer_name}{bold_end}.' ).format( enterprise_customer_name=enterprise_customer.name, bold_start='<b>', bold_end='</b>', item=item, ), 'confirmation_alert_prompt': _( 'In order to start this {item} and use your discount, {bold_start}you must{bold_end} consent ' 'to share your {item} data with {enterprise_customer_name}.' ).format( enterprise_customer_name=enterprise_customer.name, bold_start='<b>', bold_end='</b>', item=item, ), 'redirect_url': success_url, 'failure_url': failure_url, 'defer_creation': request.GET.get('defer_creation') is not None, 'requested_permissions': [ _('your enrollment in this {item}').format(item=item), _('your learning progress'), _('course completion'), ], 'policy_link_template': '', }) platform_name = context_data['platform_name'] published_only = False if self.preview_mode else True enterprise_consent_page = enterprise_customer.get_data_sharing_consent_text_overrides( published_only=published_only ) if enterprise_consent_page: context_data.update(self.get_context_from_db(enterprise_consent_page, platform_name, item, context_data)) else: context_data.update(self.get_default_context(enterprise_customer, platform_name)) return render(request, 'enterprise/grant_data_sharing_permissions.html', context=context_data)
[ "def", "get", "(", "self", ",", "request", ")", ":", "enterprise_customer_uuid", "=", "request", ".", "GET", ".", "get", "(", "'enterprise_customer_uuid'", ")", "success_url", "=", "request", ".", "GET", ".", "get", "(", "'next'", ")", "failure_url", "=", ...
Render a form to collect user input about data sharing consent.
[ "Render", "a", "form", "to", "collect", "user", "input", "about", "data", "sharing", "consent", "." ]
python
valid
48.514793
quantopian/alphalens
alphalens/utils.py
https://github.com/quantopian/alphalens/blob/d43eac871bb061e956df936794d3dd514da99e44/alphalens/utils.py#L865-L895
def timedelta_to_string(timedelta): """ Utility that converts a pandas.Timedelta to a string representation compatible with pandas.Timedelta constructor format Parameters ---------- timedelta: pd.Timedelta Returns ------- string string representation of 'timedelta' """ c = timedelta.components format = '' if c.days != 0: format += '%dD' % c.days if c.hours > 0: format += '%dh' % c.hours if c.minutes > 0: format += '%dm' % c.minutes if c.seconds > 0: format += '%ds' % c.seconds if c.milliseconds > 0: format += '%dms' % c.milliseconds if c.microseconds > 0: format += '%dus' % c.microseconds if c.nanoseconds > 0: format += '%dns' % c.nanoseconds return format
[ "def", "timedelta_to_string", "(", "timedelta", ")", ":", "c", "=", "timedelta", ".", "components", "format", "=", "''", "if", "c", ".", "days", "!=", "0", ":", "format", "+=", "'%dD'", "%", "c", ".", "days", "if", "c", ".", "hours", ">", "0", ":",...
Utility that converts a pandas.Timedelta to a string representation compatible with pandas.Timedelta constructor format Parameters ---------- timedelta: pd.Timedelta Returns ------- string string representation of 'timedelta'
[ "Utility", "that", "converts", "a", "pandas", ".", "Timedelta", "to", "a", "string", "representation", "compatible", "with", "pandas", ".", "Timedelta", "constructor", "format" ]
python
train
25.16129
econ-ark/HARK
HARK/ConsumptionSaving/ConsIndShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L1320-L1380
def prepareToCalcEndOfPrdvP(self): ''' Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. This differs from the baseline case because different savings choices yield different interest rates. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self. ''' KinkBool = self.Rboro > self.Rsave # Boolean indicating that there is actually a kink. # When Rboro == Rsave, this method acts just like it did in IndShock. # When Rboro < Rsave, the solver would have terminated when it was called. # Make a grid of end-of-period assets, including *two* copies of a=0 if KinkBool: aNrmNow = np.sort(np.hstack((np.asarray(self.aXtraGrid) + self.mNrmMinNow, np.array([0.0,0.0])))) else: aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow aXtraCount = aNrmNow.size # Make tiled versions of the assets grid and income shocks ShkCount = self.TranShkValsNext.size aNrm_temp = np.tile(aNrmNow,(ShkCount,1)) PermShkVals_temp = (np.tile(self.PermShkValsNext,(aXtraCount,1))).transpose() TranShkVals_temp = (np.tile(self.TranShkValsNext,(aXtraCount,1))).transpose() ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aXtraCount,1))).transpose() # Make a 1D array of the interest factor at each asset gridpoint Rfree_vec = self.Rsave*np.ones(aXtraCount) if KinkBool: Rfree_vec[0:(np.sum(aNrmNow<=0)-1)] = self.Rboro self.Rfree = Rfree_vec Rfree_temp = np.tile(Rfree_vec,(ShkCount,1)) # Make an array of market resources that we could have next period, # considering the grid of assets and the income shocks that could occur mNrmNext = Rfree_temp/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp # Recalculate the minimum MPC and human wealth using the interest factor on saving. # This overwrites values from setAndUpdateValues, which were based on Rboro instead. if KinkBool: PatFacTop = ((self.Rsave*self.DiscFacEff)**(1.0/self.CRRA))/self.Rsave self.MPCminNow = 1.0/(1.0 + PatFacTop/self.solution_next.MPCmin) self.hNrmNow = self.PermGroFac/self.Rsave*(np.dot(self.ShkPrbsNext, self.TranShkValsNext*self.PermShkValsNext) + self.solution_next.hNrm) # Store some of the constructed arrays for later use and return the assets grid self.PermShkVals_temp = PermShkVals_temp self.ShkPrbs_temp = ShkPrbs_temp self.mNrmNext = mNrmNext self.aNrmNow = aNrmNow return aNrmNow
[ "def", "prepareToCalcEndOfPrdvP", "(", "self", ")", ":", "KinkBool", "=", "self", ".", "Rboro", ">", "self", ".", "Rsave", "# Boolean indicating that there is actually a kink.", "# When Rboro == Rsave, this method acts just like it did in IndShock.", "# When Rboro < Rsave, the solv...
Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. This differs from the baseline case because different savings choices yield different interest rates. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self.
[ "Prepare", "to", "calculate", "end", "-", "of", "-", "period", "marginal", "value", "by", "creating", "an", "array", "of", "market", "resources", "that", "the", "agent", "could", "have", "next", "period", "considering", "the", "grid", "of", "end", "-", "of...
python
train
50.590164
TorkamaniLab/metapipe
metapipe/models/grammar.py
https://github.com/TorkamaniLab/metapipe/blob/15592e5b0c217afb00ac03503f8d0d7453d4baf4/metapipe/models/grammar.py#L64-L70
def file(): """ Grammar for files found in the overall input files. """ return ( Optional(Word(alphanums).setResultsName('alias') + Suppress(Literal('.'))) + Suppress(White()) + Word(approved_printables).setResultsName('filename') )
[ "def", "file", "(", ")", ":", "return", "(", "Optional", "(", "Word", "(", "alphanums", ")", ".", "setResultsName", "(", "'alias'", ")", "+", "Suppress", "(", "Literal", "(", "'.'", ")", ")", ")", "+", "Suppress", "(", "White", "(", ")", ")", "+", ...
Grammar for files found in the overall input files.
[ "Grammar", "for", "files", "found", "in", "the", "overall", "input", "files", "." ]
python
train
42
delph-in/pydelphin
delphin/mrs/compare.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/compare.py#L225-L284
def _node_isomorphic(a, b, check_varprops=True): """ Two Xmrs objects are isomorphic if they have the same structure as determined by variable linkages between preds. """ # first some quick checks a_var_refs = sorted(len(vd['refs']) for vd in a._vars.values()) b_var_refs = sorted(len(vd['refs']) for vd in b._vars.values()) if a_var_refs != b_var_refs: return False print() # these signature: [node] indices are meant to avoid unnecessary # comparisons; they also take care of "semantic feasibility" # constraints (comparing node values and properties). All that's # left is the "syntactic feasibility", or node-edge shapes. # nodedicts are {sig: [(id, edges), ...], ...} a_nd = _node_isomorphic_build_nodedict(a, check_varprops) #print('a', a_nd) b_nd = _node_isomorphic_build_nodedict(b, check_varprops) #print('b', b_nd) #return a_sigs = {} # for node -> sig mapping # don't recurse when things are unique agenda = [] isomap = {} for sig, a_pairs in sorted(a_nd.items(), key=lambda x: len(x[1])): b_pairs = b_nd.get(sig, []) if len(a_pairs) != len(b_pairs): return False if len(a_pairs) == 1: a_, a_edges = a_pairs[0] b_, b_edges = b_pairs[0] if len(a_edges) != len(b_edges): return False a_sigs[a_] = sig isomap[a_] = b_ for edge, a_tgt in a_edges.items(): if edge not in b_edges: return False isomap[a_tgt] = b_edges[edge] else: for a_, ed in a_pairs: a_sigs[a_] = sig agenda.append((a_, sig, ed)) #print(agenda) #return isomaps = _node_isomorphic(agenda, a_sigs, b_nd, isomap, {}) # for sig, a_candidates in sorted(a_nodes.items(), key=lambda x: len(x[1])): # b_candidates = b_nodes.get(sig, []) # if len(a_candidates) != len(b_candidates): return False # candidates.append((a_candidates, b_candidates)) # # nodemaps = _isomorphic(a, b, candidates, {}) try: next(isomaps) return True except StopIteration: return False
[ "def", "_node_isomorphic", "(", "a", ",", "b", ",", "check_varprops", "=", "True", ")", ":", "# first some quick checks", "a_var_refs", "=", "sorted", "(", "len", "(", "vd", "[", "'refs'", "]", ")", "for", "vd", "in", "a", ".", "_vars", ".", "values", ...
Two Xmrs objects are isomorphic if they have the same structure as determined by variable linkages between preds.
[ "Two", "Xmrs", "objects", "are", "isomorphic", "if", "they", "have", "the", "same", "structure", "as", "determined", "by", "variable", "linkages", "between", "preds", "." ]
python
train
36.35
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/ext/_bundled/mplexporter.py#L584-L613
def draw_markers(self, data, coordinates, style, label, mplobj=None): """ Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection """ vertices, pathcodes = style['markerpath'] pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']) pathstyle['dasharray'] = "10,0" for vertex in data: self.draw_path(data=vertices, coordinates="points", pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj)
[ "def", "draw_markers", "(", "self", ",", "data", ",", "coordinates", ",", "style", ",", "label", ",", "mplobj", "=", "None", ")", ":", "vertices", ",", "pathcodes", "=", "style", "[", "'markerpath'", "]", "pathstyle", "=", "dict", "(", "(", "key", ",",...
Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection
[ "Draw", "a", "set", "of", "markers", ".", "By", "default", "this", "is", "done", "by", "repeatedly", "calling", "draw_path", "()", "but", "renderers", "should", "generally", "overload", "this", "method", "to", "provide", "a", "more", "efficient", "implementati...
python
train
47.1
floyernick/fleep-py
fleep/__init__.py
https://github.com/floyernick/fleep-py/blob/994bc2c274482d80ab13d89d8f7343eb316d3e44/fleep/__init__.py#L50-L82
def get(obj): """ Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance """ if not isinstance(obj, bytes): raise TypeError("object type must be bytes") info = { "type": dict(), "extension": dict(), "mime": dict() } stream = " ".join(['{:02X}'.format(byte) for byte in obj]) for element in data: for signature in element["signature"]: offset = element["offset"] * 2 + element["offset"] if signature == stream[offset:len(signature) + offset]: for key in ["type", "extension", "mime"]: info[key][element[key]] = len(signature) for key in ["type", "extension", "mime"]: info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)] return Info(info["type"], info["extension"], info["mime"])
[ "def", "get", "(", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"object type must be bytes\"", ")", "info", "=", "{", "\"type\"", ":", "dict", "(", ")", ",", "\"extension\"", ":", "dict", ...
Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance
[ "Determines", "file", "format", "and", "picks", "suitable", "file", "types", "extensions", "and", "MIME", "types" ]
python
train
30.151515
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/386asm.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/386asm.py#L42-L52
def generate(env): """Add Builders and construction variables for ar to an Environment.""" as_module.generate(env) env['AS'] = '386asm' env['ASFLAGS'] = SCons.Util.CLVar('') env['ASPPFLAGS'] = '$ASFLAGS' env['ASCOM'] = '$AS $ASFLAGS $SOURCES -o $TARGET' env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $SOURCES -o $TARGET' addPharLapPaths(env)
[ "def", "generate", "(", "env", ")", ":", "as_module", ".", "generate", "(", "env", ")", "env", "[", "'AS'", "]", "=", "'386asm'", "env", "[", "'ASFLAGS'", "]", "=", "SCons", ".", "Util", ".", "CLVar", "(", "''", ")", "env", "[", "'ASPPFLAGS'", "]",...
Add Builders and construction variables for ar to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "ar", "to", "an", "Environment", "." ]
python
train
36.818182
eagleamon/pynetio
pynetio.py
https://github.com/eagleamon/pynetio/blob/3bc212cae18608de0214b964e395877d3ca4aa7b/pynetio.py#L46-L49
def update(self): """ Update all the switch values """ self.states = [bool(int(x)) for x in self.get('port list') or '0000']
[ "def", "update", "(", "self", ")", ":", "self", ".", "states", "=", "[", "bool", "(", "int", "(", "x", ")", ")", "for", "x", "in", "self", ".", "get", "(", "'port list'", ")", "or", "'0000'", "]" ]
Update all the switch values
[ "Update", "all", "the", "switch", "values" ]
python
train
34.5
h2non/filetype.py
filetype/filetype.py
https://github.com/h2non/filetype.py/blob/37e7fd1a9eed1a9eab55ac43f62da98f10970675/filetype/filetype.py#L67-L82
def get_type(mime=None, ext=None): """ Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. """ for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
[ "def", "get_type", "(", "mime", "=", "None", ",", "ext", "=", "None", ")", ":", "for", "kind", "in", "types", ":", "if", "kind", ".", "extension", "is", "ext", "or", "kind", ".", "mime", "is", "mime", ":", "return", "kind", "return", "None" ]
Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None.
[ "Returns", "the", "file", "type", "instance", "searching", "by", "MIME", "type", "or", "file", "extension", "." ]
python
train
26.875
DLR-RM/RAFCON
source/rafcon/gui/models/selection.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/selection.py#L206-L229
def handle_prepared_selection_of_core_class_elements(self, core_class, models): """Handles the selection for TreeStore widgets maintaining lists of a specific `core_class` elements If widgets hold a TreeStore with elements of a specific `core_class`, the local selection of that element type is handled by that widget. This method is called to integrate the local selection with the overall selection of the state machine. If no modifier key (indicating to extend the selection) is pressed, the state machine selection is set to the passed selection. If the selection is to be extended, the state machine collection will consist of the widget selection plus all previously selected elements not having the core class `core_class`. :param State | StateElement core_class: The core class of the elements the widget handles :param models: The list of models that are currently being selected locally """ if extend_selection(): self._selected.difference_update(self.get_selected_elements_of_core_class(core_class)) else: self._selected.clear() models = self._check_model_types(models) if len(models) > 1: models = reduce_to_parent_states(models) self._selected.update(models)
[ "def", "handle_prepared_selection_of_core_class_elements", "(", "self", ",", "core_class", ",", "models", ")", ":", "if", "extend_selection", "(", ")", ":", "self", ".", "_selected", ".", "difference_update", "(", "self", ".", "get_selected_elements_of_core_class", "(...
Handles the selection for TreeStore widgets maintaining lists of a specific `core_class` elements If widgets hold a TreeStore with elements of a specific `core_class`, the local selection of that element type is handled by that widget. This method is called to integrate the local selection with the overall selection of the state machine. If no modifier key (indicating to extend the selection) is pressed, the state machine selection is set to the passed selection. If the selection is to be extended, the state machine collection will consist of the widget selection plus all previously selected elements not having the core class `core_class`. :param State | StateElement core_class: The core class of the elements the widget handles :param models: The list of models that are currently being selected locally
[ "Handles", "the", "selection", "for", "TreeStore", "widgets", "maintaining", "lists", "of", "a", "specific", "core_class", "elements" ]
python
train
54.541667
potash/drain
drain/aggregation.py
https://github.com/potash/drain/blob/ddd62081cb9317beb5d21f86c8b4bb196ca3d222/drain/aggregation.py#L121-L151
def select(self, df, args, inplace=False): """ After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']} """ if self.prefix is None: raise ValueError('Cannot do selection on an Aggregation without a prefix') # run list_expand and ensure all args to tuples for validation args = [tuple(i) for i in util.list_expand(args)] # check that the args passed are valid for a in args: has_arg = False for argument in self.arguments: if a == tuple(argument[k] for k in self.concat_args): has_arg = True break if not has_arg: raise ValueError('Invalid argument for selection: %s' % str(a)) df = data.select_features( df, exclude=[self.prefix + '_.*'], include=map(lambda a: self.args_prefix(a) + '.*', args), inplace=inplace) return df
[ "def", "select", "(", "self", ",", "df", ",", "args", ",", "inplace", "=", "False", ")", ":", "if", "self", ".", "prefix", "is", "None", ":", "raise", "ValueError", "(", "'Cannot do selection on an Aggregation without a prefix'", ")", "# run list_expand and ensure...
After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']}
[ "After", "joining", "selects", "a", "subset", "of", "arguments", "df", ":", "the", "result", "of", "a", "call", "to", "self", ".", "join", "(", "left", ")", "args", ":", "a", "collcetion", "of", "arguments", "to", "select", "as", "accepted", "by", "dra...
python
train
41.096774
anti1869/sunhead
src/sunhead/events/transports/amqp.py
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/events/transports/amqp.py#L87-L119
async def connect(self): """ Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything. """ if self.connected or self.is_connecting: return self._is_connecting = True try: logger.info("Connecting to RabbitMQ...") self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters) logger.info("Getting channel...") self._channel = await self._protocol.channel() if self._global_qos is not None: logger.info("Setting prefetch count on connection (%s)", self._global_qos) await self._channel.basic_qos(0, self._global_qos, 1) logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type) await self._channel.exchange(self._exchange_name, self._exchange_type) except (aioamqp.AmqpClosedConnection, Exception): logger.error("Error initializing RabbitMQ connection", exc_info=True) self._is_connecting = False raise exceptions.StreamConnectionError self._is_connecting = False
[ "async", "def", "connect", "(", "self", ")", ":", "if", "self", ".", "connected", "or", "self", ".", "is_connecting", ":", "return", "self", ".", "_is_connecting", "=", "True", "try", ":", "logger", ".", "info", "(", "\"Connecting to RabbitMQ...\"", ")", "...
Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything.
[ "Create", "new", "asynchronous", "connection", "to", "the", "RabbitMQ", "instance", ".", "This", "will", "connect", "declare", "exchange", "and", "bind", "itself", "to", "the", "configured", "queue", "." ]
python
train
40
insanum/gcalcli
gcalcli/validators.py
https://github.com/insanum/gcalcli/blob/428378a88f89d154c8d4046deb9bdb5eb4e81019/gcalcli/validators.py#L31-L45
def color_validator(input_str): """ A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise. """ try: assert input_str in VALID_OVERRIDE_COLORS + [''] return input_str except AssertionError: raise ValidationError( 'Expected colors are: ' + ', '.join(color for color in VALID_OVERRIDE_COLORS) + '. (Ctrl-C to exit)\n')
[ "def", "color_validator", "(", "input_str", ")", ":", "try", ":", "assert", "input_str", "in", "VALID_OVERRIDE_COLORS", "+", "[", "''", "]", "return", "input_str", "except", "AssertionError", ":", "raise", "ValidationError", "(", "'Expected colors are: '", "+", "'...
A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise.
[ "A", "filter", "allowing", "only", "the", "particular", "colors", "used", "by", "the", "Google", "Calendar", "API" ]
python
train
30.533333
czepluch/pysecp256k1
c_secp256k1/__init__.py
https://github.com/czepluch/pysecp256k1/blob/164cb305857c5ba7a26adb6bd85459c5ea32ddd1/c_secp256k1/__init__.py#L236-L266
def ecdsa_recover_compact(msg32, sig): """ Takes the a message and a parsed recoverable signature Returns the serialized public key from the private key in the sign function """ assert isinstance(msg32, bytes) assert len(msg32) == 32 _check_signature(sig) # Check that recid is of valid value recid = ord(sig[64:65]) if not (recid >= 0 and recid <= 3): raise InvalidSignatureError() # Setting the pubkey array pubkey = ffi.new("secp256k1_pubkey *") lib.secp256k1_ecdsa_recover( ctx, pubkey, _parse_to_recoverable_signature(sig), msg32 ) serialized_pubkey = _serialize_pubkey(pubkey) buf = ffi.buffer(serialized_pubkey, 65) r = buf[:] assert isinstance(r, bytes) assert len(r) == 65, len(r) return r
[ "def", "ecdsa_recover_compact", "(", "msg32", ",", "sig", ")", ":", "assert", "isinstance", "(", "msg32", ",", "bytes", ")", "assert", "len", "(", "msg32", ")", "==", "32", "_check_signature", "(", "sig", ")", "# Check that recid is of valid value", "recid", "...
Takes the a message and a parsed recoverable signature Returns the serialized public key from the private key in the sign function
[ "Takes", "the", "a", "message", "and", "a", "parsed", "recoverable", "signature", "Returns", "the", "serialized", "public", "key", "from", "the", "private", "key", "in", "the", "sign", "function" ]
python
train
25.83871
greenbone/ospd
ospd/ospd.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L1040-L1055
def elements_as_text(self, elems, indent=2): """ Returns the elems dictionary as formatted plain text. """ assert elems text = "" for elename, eledesc in elems.items(): if isinstance(eledesc, dict): desc_txt = self.elements_as_text(eledesc, indent + 2) desc_txt = ''.join(['\n', desc_txt]) elif isinstance(eledesc, str): desc_txt = ''.join([eledesc, '\n']) else: assert False, "Only string or dictionary" ele_txt = "\t{0}{1: <22} {2}".format(' ' * indent, elename, desc_txt) text = ''.join([text, ele_txt]) return text
[ "def", "elements_as_text", "(", "self", ",", "elems", ",", "indent", "=", "2", ")", ":", "assert", "elems", "text", "=", "\"\"", "for", "elename", ",", "eledesc", "in", "elems", ".", "items", "(", ")", ":", "if", "isinstance", "(", "eledesc", ",", "d...
Returns the elems dictionary as formatted plain text.
[ "Returns", "the", "elems", "dictionary", "as", "formatted", "plain", "text", "." ]
python
train
44.75
juju/charm-helpers
charmhelpers/contrib/charmsupport/nrpe.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/charmsupport/nrpe.py#L341-L352
def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
[ "def", "get_nagios_unit_name", "(", "relation_name", "=", "'nrpe-external-master'", ")", ":", "host_context", "=", "get_nagios_hostcontext", "(", "relation_name", ")", "if", "host_context", ":", "unit", "=", "\"%s:%s\"", "%", "(", "host_context", ",", "local_unit", ...
Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to
[ "Return", "the", "nagios", "unit", "name", "prepended", "with", "host_context", "if", "needed" ]
python
train
32.583333
jingw/pyhdfs
pyhdfs.py
https://github.com/jingw/pyhdfs/blob/b382b34f7cb28b41559f5be73102beb1732cd933/pyhdfs.py#L568-L576
def set_permission(self, path, **kwargs): """Set permission of a path. :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal """ response = self._put(path, 'SETPERMISSION', **kwargs) assert not response.content
[ "def", "set_permission", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "response", "=", "self", ".", "_put", "(", "path", ",", "'SETPERMISSION'", ",", "*", "*", "kwargs", ")", "assert", "not", "response", ".", "content" ]
Set permission of a path. :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal
[ "Set", "permission", "of", "a", "path", "." ]
python
train
37.777778
tcalmant/ipopo
pelix/ipopo/handlers/requires.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requires.py#L492-L500
def clear(self): """ Cleans up the manager. The manager can't be used after this method has been called """ self.services.clear() self.services = None self._future_value = None super(AggregateDependency, self).clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "services", ".", "clear", "(", ")", "self", ".", "services", "=", "None", "self", ".", "_future_value", "=", "None", "super", "(", "AggregateDependency", ",", "self", ")", ".", "clear", "(", ")" ]
Cleans up the manager. The manager can't be used after this method has been called
[ "Cleans", "up", "the", "manager", ".", "The", "manager", "can", "t", "be", "used", "after", "this", "method", "has", "been", "called" ]
python
train
30.333333
lobocv/crashreporter
crashreporter/crashreporter.py
https://github.com/lobocv/crashreporter/blob/a5bbb3f37977dc64bc865dfedafc365fd5469ef8/crashreporter/crashreporter.py#L328-L352
def delete_offline_reports(self): """ Delete all stored offline reports :return: List of reports that still require submission """ reports = self.get_offline_reports() remaining_reports = reports[:] for report in reports: with open(report, 'r') as _f: try: js = json.load(_f) except ValueError as e: logging.error("%s. Deleting crash report.") os.remove(report) continue if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'): # Only delete the reports which have been sent or who's upload method is disabled. remaining_reports.remove(report) try: os.remove(report) except OSError as e: logging.error(e) self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports)) return remaining_reports
[ "def", "delete_offline_reports", "(", "self", ")", ":", "reports", "=", "self", ".", "get_offline_reports", "(", ")", "remaining_reports", "=", "reports", "[", ":", "]", "for", "report", "in", "reports", ":", "with", "open", "(", "report", ",", "'r'", ")",...
Delete all stored offline reports :return: List of reports that still require submission
[ "Delete", "all", "stored", "offline", "reports", ":", "return", ":", "List", "of", "reports", "that", "still", "require", "submission" ]
python
train
43.8
UpCloudLtd/upcloud-python-api
upcloud_api/cloud_manager/server_mixin.py
https://github.com/UpCloudLtd/upcloud-python-api/blob/954b0ad7c4b932b2be31a95d88975f6b0eeac8ed/upcloud_api/cloud_manager/server_mixin.py#L80-L127
def create_server(self, server): """ Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012" """ if isinstance(server, Server): body = server.prepare_post_body() else: server = Server._create_server_obj(server, cloud_manager=self) body = server.prepare_post_body() res = self.post_request('/server', body) server_to_return = server server_to_return._reset( res['server'], cloud_manager=self, populated=True ) return server_to_return
[ "def", "create_server", "(", "self", ",", "server", ")", ":", "if", "isinstance", "(", "server", ",", "Server", ")", ":", "body", "=", "server", ".", "prepare_post_body", "(", ")", "else", ":", "server", "=", "Server", ".", "_create_server_obj", "(", "se...
Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012"
[ "Create", "a", "server", "and", "its", "storages", "based", "on", "a", "(", "locally", "created", ")", "Server", "object", "." ]
python
train
35.916667
Azure/azure-cosmos-python
azure/cosmos/range.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/range.py#L57-L68
def Contains(self, other): """Checks if the passed parameter is in the range of this object. """ if other is None: raise ValueError("other is None.") if isinstance(other, Range): if other.low >= self.low and other.high <= self.high: return True return False else: return self.Contains(Range(other, other))
[ "def", "Contains", "(", "self", ",", "other", ")", ":", "if", "other", "is", "None", ":", "raise", "ValueError", "(", "\"other is None.\"", ")", "if", "isinstance", "(", "other", ",", "Range", ")", ":", "if", "other", ".", "low", ">=", "self", ".", "...
Checks if the passed parameter is in the range of this object.
[ "Checks", "if", "the", "passed", "parameter", "is", "in", "the", "range", "of", "this", "object", "." ]
python
train
33.25
juju/python-libjuju
juju/client/_client1.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client1.py#L548-L561
async def Restore(self, backup_id): ''' backup_id : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Backups', request='Restore', version=1, params=_params) _params['backup-id'] = backup_id reply = await self.rpc(msg) return reply
[ "async", "def", "Restore", "(", "self", ",", "backup_id", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Backups'", ",", "request", "=", "'Restore'", ",", "version", "=", "1", ",", "par...
backup_id : str Returns -> None
[ "backup_id", ":", "str", "Returns", "-", ">", "None" ]
python
train
28
SmileyChris/easy-thumbnails
easy_thumbnails/templatetags/thumbnail.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/templatetags/thumbnail.py#L287-L302
def thumbnail_url(source, alias): """ Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt=""> """ try: thumb = get_thumbnailer(source)[alias] except Exception: return '' return thumb.url
[ "def", "thumbnail_url", "(", "source", ",", "alias", ")", ":", "try", ":", "thumb", "=", "get_thumbnailer", "(", "source", ")", "[", "alias", "]", "except", "Exception", ":", "return", "''", "return", "thumb", ".", "url" ]
Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
[ "Return", "the", "thumbnail", "url", "for", "a", "source", "file", "using", "an", "aliased", "set", "of", "thumbnail", "options", "." ]
python
train
24.75
limix/limix-core
limix_core/mean/linear.py
https://github.com/limix/limix-core/blob/5c590b4d351409f83ca320844b4897ce92203814/limix_core/mean/linear.py#L567-L572
def getParams(self): """ get params """ rv = np.array([]) if self.n_terms>0: rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)]) return rv
[ "def", "getParams", "(", "self", ")", ":", "rv", "=", "np", ".", "array", "(", "[", "]", ")", "if", "self", ".", "n_terms", ">", "0", ":", "rv", "=", "np", ".", "concatenate", "(", "[", "np", ".", "reshape", "(", "self", ".", "B", "[", "term_...
get params
[ "get", "params" ]
python
train
40
polyaxon/polyaxon
polyaxon/scheduler/spawners/templates/volumes.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/scheduler/spawners/templates/volumes.py#L128-L144
def get_shm_volumes(): """ Mount an tmpfs volume to /dev/shm. This will set /dev/shm size to half of the RAM of node. By default, /dev/shm is very small, only 64MB. Some experiments will fail due to lack of share memory, such as some experiments running on Pytorch. """ volumes, volume_mounts = [], [] shm_volume = client.V1Volume( name=constants.SHM_VOLUME, empty_dir=client.V1EmptyDirVolumeSource(medium='Memory') ) volumes.append(shm_volume) shm_volume_mount = client.V1VolumeMount(name=shm_volume.name, mount_path='/dev/shm') volume_mounts.append(shm_volume_mount) return volumes, volume_mounts
[ "def", "get_shm_volumes", "(", ")", ":", "volumes", ",", "volume_mounts", "=", "[", "]", ",", "[", "]", "shm_volume", "=", "client", ".", "V1Volume", "(", "name", "=", "constants", ".", "SHM_VOLUME", ",", "empty_dir", "=", "client", ".", "V1EmptyDirVolumeS...
Mount an tmpfs volume to /dev/shm. This will set /dev/shm size to half of the RAM of node. By default, /dev/shm is very small, only 64MB. Some experiments will fail due to lack of share memory, such as some experiments running on Pytorch.
[ "Mount", "an", "tmpfs", "volume", "to", "/", "dev", "/", "shm", ".", "This", "will", "set", "/", "dev", "/", "shm", "size", "to", "half", "of", "the", "RAM", "of", "node", ".", "By", "default", "/", "dev", "/", "shm", "is", "very", "small", "only...
python
train
38.470588
pypa/pipenv
pipenv/patched/notpip/_vendor/html5lib/_inputstream.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/_inputstream.py#L240-L253
def char(self): """ Read one character from the stream or queue if available. Return EOF when EOF is reached. """ # Read a new chunk from the input stream if necessary if self.chunkOffset >= self.chunkSize: if not self.readChunk(): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = chunkOffset + 1 return char
[ "def", "char", "(", "self", ")", ":", "# Read a new chunk from the input stream if necessary", "if", "self", ".", "chunkOffset", ">=", "self", ".", "chunkSize", ":", "if", "not", "self", ".", "readChunk", "(", ")", ":", "return", "EOF", "chunkOffset", "=", "se...
Read one character from the stream or queue if available. Return EOF when EOF is reached.
[ "Read", "one", "character", "from", "the", "stream", "or", "queue", "if", "available", ".", "Return", "EOF", "when", "EOF", "is", "reached", "." ]
python
train
31.714286
praekelt/django-export
export/fields.py
https://github.com/praekelt/django-export/blob/e2facdd53c9cbfa84d1409c7f0efe5d638812946/export/fields.py#L506-L556
def to_python(self, value): """ Validates that the input can be converted to a time. Returns a Python datetime.time object. """ if value in validators.EMPTY_VALUES: return None if isinstance(value, datetime.datetime): return value.time() if isinstance(value, datetime.time): return value if isinstance(value, list): # Input comes from a 2 SplitTimeWidgets, for example. So, it's two # components: start time and end time. if len(value) != 2: raise ValidationError(self.error_messages['invalid']) if value[0] in validators.EMPTY_VALUES and value[1] in \ validators.EMPTY_VALUES: return None start_value = value[0] end_value = value[1] start_time = None end_time = None for format in self.input_formats or formats.get_format(\ 'TIME_INPUT_FORMATS'): try: start_time = datetime.datetime( *time.strptime(start_value, format)[:6] ).time() except ValueError: if start_time: continue else: raise ValidationError(self.error_messages['invalid']) for format in self.input_formats or formats.get_format(\ 'TIME_INPUT_FORMATS'): try: end_time = datetime.datetime( *time.strptime(end_value, format)[:6] ).time() except ValueError: if end_time: continue else: raise ValidationError(self.error_messages['invalid']) return (start_time, end_time)
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "value", "in", "validators", ".", "EMPTY_VALUES", ":", "return", "None", "if", "isinstance", "(", "value", ",", "datetime", ".", "datetime", ")", ":", "return", "value", ".", "time", "(", "...
Validates that the input can be converted to a time. Returns a Python datetime.time object.
[ "Validates", "that", "the", "input", "can", "be", "converted", "to", "a", "time", ".", "Returns", "a", "Python", "datetime", ".", "time", "object", "." ]
python
train
34.823529
APSL/transmanager
transmanager/manager.py
https://github.com/APSL/transmanager/blob/79157085840008e146b264521681913090197ed1/transmanager/manager.py#L257-L288
def get_languages(self, include_main=False): """ Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: """ if not self.master: raise Exception('TransManager - No master set') item_languages = self.get_languages_from_item(self.ct_master, self.master) languages = self.get_languages_from_model(self.ct_master.app_label, self.ct_master.model) if not languages: languages = self.get_languages_from_application(self.ct_master.app_label) # if not languages: # languages = self.get_languages_default() if not include_main: main_language = self.get_main_language() if main_language in languages: languages.remove(main_language) return list(set(item_languages + languages))
[ "def", "get_languages", "(", "self", ",", "include_main", "=", "False", ")", ":", "if", "not", "self", ".", "master", ":", "raise", "Exception", "(", "'TransManager - No master set'", ")", "item_languages", "=", "self", ".", "get_languages_from_item", "(", "self...
Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return:
[ "Get", "all", "the", "languages", "except", "the", "main", "." ]
python
train
32.1875
JamesRamm/longclaw
longclaw/contrib/productrequests/api.py
https://github.com/JamesRamm/longclaw/blob/8bbf2e6d703271b815ec111813c7c5d1d4e4e810/longclaw/contrib/productrequests/api.py#L35-L40
def requests_for_variant(self, request, variant_id=None): """Get all the requests for a single variant """ requests = ProductRequest.objects.filter(variant__id=variant_id) serializer = self.serializer_class(requests, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK)
[ "def", "requests_for_variant", "(", "self", ",", "request", ",", "variant_id", "=", "None", ")", ":", "requests", "=", "ProductRequest", ".", "objects", ".", "filter", "(", "variant__id", "=", "variant_id", ")", "serializer", "=", "self", ".", "serializer_clas...
Get all the requests for a single variant
[ "Get", "all", "the", "requests", "for", "a", "single", "variant" ]
python
train
54.5
Spinmob/spinmob
_pylab_colormap.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_pylab_colormap.py#L488-L494
def _slider_changed(self, n): """ updates the colormap / plot """ self._button_save.setEnabled(True) self.modify_colorpoint(n, self._sliders[n].value()*0.001, self._colorpoint_list[n][1], self._colorpoint_list[n][2])
[ "def", "_slider_changed", "(", "self", ",", "n", ")", ":", "self", ".", "_button_save", ".", "setEnabled", "(", "True", ")", "self", ".", "modify_colorpoint", "(", "n", ",", "self", ".", "_sliders", "[", "n", "]", ".", "value", "(", ")", "*", "0.001"...
updates the colormap / plot
[ "updates", "the", "colormap", "/", "plot" ]
python
train
35.857143
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py#L823-L848
def where(self, code): """Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter """ self.__check_okay_to_chain() if not isinstance(code, Code): code = Code(code) self.__spec["$where"] = code return self
[ "def", "where", "(", "self", ",", "code", ")", ":", "self", ".", "__check_okay_to_chain", "(", ")", "if", "not", "isinstance", "(", "code", ",", "Code", ")", ":", "code", "=", "Code", "(", "code", ")", "self", ".", "__spec", "[", "\"$where\"", "]", ...
Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter
[ "Adds", "a", "$where", "clause", "to", "this", "query", "." ]
python
train
40.730769
cuihantao/andes
andes/utils/tab.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/utils/tab.py#L65-L70
def add_left_space(self, nspace=1): """elem_add n cols of spaces before the first col. (for texttable 0.8.3)""" sp = ' ' * nspace for item in self._rows: item[0] = sp + item[0]
[ "def", "add_left_space", "(", "self", ",", "nspace", "=", "1", ")", ":", "sp", "=", "' '", "*", "nspace", "for", "item", "in", "self", ".", "_rows", ":", "item", "[", "0", "]", "=", "sp", "+", "item", "[", "0", "]" ]
elem_add n cols of spaces before the first col. (for texttable 0.8.3)
[ "elem_add", "n", "cols", "of", "spaces", "before", "the", "first", "col", ".", "(", "for", "texttable", "0", ".", "8", ".", "3", ")" ]
python
train
36.333333
minhhoit/yacms
yacms/generic/views.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/generic/views.py#L26-L43
def admin_keywords_submit(request): """ Adds any new given keywords from the custom keywords field in the admin, and returns their IDs for use when saving a model with a keywords field. """ keyword_ids, titles = [], [] remove = punctuation.replace("-", "") # Strip punctuation, allow dashes. for title in request.POST.get("text_keywords", "").split(","): title = "".join([c for c in title if c not in remove]).strip() if title: kw, created = Keyword.objects.get_or_create_iexact(title=title) keyword_id = str(kw.id) if keyword_id not in keyword_ids: keyword_ids.append(keyword_id) titles.append(title) return HttpResponse("%s|%s" % (",".join(keyword_ids), ", ".join(titles)), content_type='text/plain')
[ "def", "admin_keywords_submit", "(", "request", ")", ":", "keyword_ids", ",", "titles", "=", "[", "]", ",", "[", "]", "remove", "=", "punctuation", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "# Strip punctuation, allow dashes.", "for", "title", "in", "r...
Adds any new given keywords from the custom keywords field in the admin, and returns their IDs for use when saving a model with a keywords field.
[ "Adds", "any", "new", "given", "keywords", "from", "the", "custom", "keywords", "field", "in", "the", "admin", "and", "returns", "their", "IDs", "for", "use", "when", "saving", "a", "model", "with", "a", "keywords", "field", "." ]
python
train
45.222222
tanghaibao/jcvi
jcvi/assembly/postprocess.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/postprocess.py#L87-L99
def fasta2bed(fastafile): """ Alternative BED generation from FASTA file. Used for sanity check. """ dustfasta = fastafile.rsplit(".", 1)[0] + ".dust.fasta" for name, seq in parse_fasta(dustfasta): for islower, ss in groupby(enumerate(seq), key=lambda x: x[-1].islower()): if not islower: continue ss = list(ss) ms, mn = min(ss) xs, xn = max(ss) print("\t".join(str(x) for x in (name, ms, xs)))
[ "def", "fasta2bed", "(", "fastafile", ")", ":", "dustfasta", "=", "fastafile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".dust.fasta\"", "for", "name", ",", "seq", "in", "parse_fasta", "(", "dustfasta", ")", ":", "for", "islowe...
Alternative BED generation from FASTA file. Used for sanity check.
[ "Alternative", "BED", "generation", "from", "FASTA", "file", ".", "Used", "for", "sanity", "check", "." ]
python
train
37.384615
facebookresearch/fastText
python/fastText/FastText.py
https://github.com/facebookresearch/fastText/blob/6dd2e11b5fe82854c4529d2a58d699b2cb182b1b/python/fastText/FastText.py#L163-L174
def get_words(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. """ pair = self.f.getVocab(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0]
[ "def", "get_words", "(", "self", ",", "include_freq", "=", "False", ",", "on_unicode_error", "=", "'strict'", ")", ":", "pair", "=", "self", ".", "f", ".", "getVocab", "(", "on_unicode_error", ")", "if", "include_freq", ":", "return", "(", "pair", "[", "...
Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords.
[ "Get", "the", "entire", "list", "of", "words", "of", "the", "dictionary", "optionally", "including", "the", "frequency", "of", "the", "individual", "words", ".", "This", "does", "not", "include", "any", "subwords", ".", "For", "that", "please", "consult", "t...
python
train
39.083333
pricingassistant/mongokat
mongokat/document.py
https://github.com/pricingassistant/mongokat/blob/61eaf4bc1c4cc359c6f9592ec97b9a04d9561411/mongokat/document.py#L125-L146
def reload(self): """ allow to refresh the document, so after using update(), it could reload its value from the database. Be carreful : reload() will erase all unsaved values. If no _id is set in the document, a KeyError is raised. """ old_doc = self.mongokat_collection.find_one({"_id": self['_id']}, read_use="primary") if not old_doc: raise OperationFailure('Can not reload an unsaved document.' ' %s is not found in the database. Maybe _id was a string and not ObjectId?' % self['_id']) else: for k in list(self.keys()): del self[k] self.update(dotdict(old_doc)) self._initialized_with_doc = False
[ "def", "reload", "(", "self", ")", ":", "old_doc", "=", "self", ".", "mongokat_collection", ".", "find_one", "(", "{", "\"_id\"", ":", "self", "[", "'_id'", "]", "}", ",", "read_use", "=", "\"primary\"", ")", "if", "not", "old_doc", ":", "raise", "Oper...
allow to refresh the document, so after using update(), it could reload its value from the database. Be carreful : reload() will erase all unsaved values. If no _id is set in the document, a KeyError is raised.
[ "allow", "to", "refresh", "the", "document", "so", "after", "using", "update", "()", "it", "could", "reload", "its", "value", "from", "the", "database", "." ]
python
train
34.272727
YosaiProject/yosai
yosai/core/subject/subject.py
https://github.com/YosaiProject/yosai/blob/7f96aa6b837ceae9bf3d7387cd7e35f5ab032575/yosai/core/subject/subject.py#L495-L507
def run_as(self, identifiers): """ :type identifiers: subject_abcs.IdentifierCollection """ if (not self.has_identifiers): msg = ("This subject does not yet have an identity. Assuming the " "identity of another Subject is only allowed for Subjects " "with an existing identity. Try logging this subject in " "first, or using the DelegatingSubject.Builder " "to build ad hoc Subject instances with identities as " "necessary.") raise ValueError(msg) self.push_identity(identifiers)
[ "def", "run_as", "(", "self", ",", "identifiers", ")", ":", "if", "(", "not", "self", ".", "has_identifiers", ")", ":", "msg", "=", "(", "\"This subject does not yet have an identity. Assuming the \"", "\"identity of another Subject is only allowed for Subjects \"", "\"wit...
:type identifiers: subject_abcs.IdentifierCollection
[ ":", "type", "identifiers", ":", "subject_abcs", ".", "IdentifierCollection" ]
python
train
48.461538
kobejohn/PQHelper
pqhelper/base.py
https://github.com/kobejohn/PQHelper/blob/d2b78a22dcb631794295e6a159b06f39c3f10db6/pqhelper/base.py#L160-L179
def _board_from_game_image(self, game_image): """Return a board object matching the board in the game image. Return None if any tiles are not identified. """ # board image board_rect = self._board_tools['board_region'].region_in(game_image) t, l, b, r = board_rect board_image = game_image[t:b, l:r] # board grid and tiles --> fill in a Board object board = Board() grid = self._board_tools['grid'] tile_id = self._board_tools['tile_id'] for p, borders in grid.borders_by_grid_position(board_image): t, l, b, r = borders tile = board_image[t:b, l:r] tile_character = tile_id.identify(tile) if tile_character is None: return None # soft failure board[p] = Tile.singleton(tile_character) return board
[ "def", "_board_from_game_image", "(", "self", ",", "game_image", ")", ":", "# board image", "board_rect", "=", "self", ".", "_board_tools", "[", "'board_region'", "]", ".", "region_in", "(", "game_image", ")", "t", ",", "l", ",", "b", ",", "r", "=", "board...
Return a board object matching the board in the game image. Return None if any tiles are not identified.
[ "Return", "a", "board", "object", "matching", "the", "board", "in", "the", "game", "image", ".", "Return", "None", "if", "any", "tiles", "are", "not", "identified", "." ]
python
train
43
notsorandomname/pytb
py-modules/cpython.py
https://github.com/notsorandomname/pytb/blob/8f3544b6b72917f6b9b493a3a06025f88671aff7/py-modules/cpython.py#L745-L759
def get_all_objects(self): "Return pointers to all GC tracked objects" for i, generation in enumerate(self.gc_generations): generation_head_ptr = pygc_head_ptr = generation.head.get_pointer() generation_head_addr = generation_head_ptr._value while True: # _PyObjectBase_GC_UNTRACK macro says that # gc_prev always points to some value # there is still a race condition if PyGC_Head # gets free'd and overwritten just before we look # at him pygc_head_ptr = pygc_head_ptr.deref().gc_next if pygc_head_ptr._value == generation_head_addr: break yield pygc_head_ptr.deref().get_object_ptr()
[ "def", "get_all_objects", "(", "self", ")", ":", "for", "i", ",", "generation", "in", "enumerate", "(", "self", ".", "gc_generations", ")", ":", "generation_head_ptr", "=", "pygc_head_ptr", "=", "generation", ".", "head", ".", "get_pointer", "(", ")", "gener...
Return pointers to all GC tracked objects
[ "Return", "pointers", "to", "all", "GC", "tracked", "objects" ]
python
train
51.466667
anttttti/Wordbatch
wordbatch/feature_union.py
https://github.com/anttttti/Wordbatch/blob/ef57b5c1d87d9c82fb096598125c2511f9819e4d/wordbatch/feature_union.py#L139-L171
def fit_transform(self, X, y=None, **fit_params): """Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ self._validate_transformers() with Pool(self.n_jobs) as pool: result = pool.starmap(_fit_transform_one, ((trans, weight, X[trans['col_pick']] if hasattr(trans, 'col_pick') else X, y) for name, trans, weight in self._iter())) if not result: # All transformers are None return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if self.concatenate: if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ",", "*", "*", "fit_params", ")", ":", "self", ".", "_validate_transformers", "(", ")", "with", "Pool", "(", "self", ".", "n_jobs", ")", "as", "pool", ":", "result", "=", "pool", ...
Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers.
[ "Fit", "all", "transformers", "transform", "the", "data", "and", "concatenate", "results", "." ]
python
train
39.636364
sassoo/goldman
goldman/resources/models.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/resources/models.py#L20-L39
def on_get(resc, req, resp): """ Get the models identified by query parameters We return an empty list if no models are found. """ signals.pre_req.send(resc.model) signals.pre_req_search.send(resc.model) models = goldman.sess.store.search(resc.rtype, **{ 'filters': req.filters, 'pages': req.pages, 'sorts': req.sorts, }) props = to_rest_models(models, includes=req.includes) resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_search.send(resc.model)
[ "def", "on_get", "(", "resc", ",", "req", ",", "resp", ")", ":", "signals", ".", "pre_req", ".", "send", "(", "resc", ".", "model", ")", "signals", ".", "pre_req_search", ".", "send", "(", "resc", ".", "model", ")", "models", "=", "goldman", ".", "...
Get the models identified by query parameters We return an empty list if no models are found.
[ "Get", "the", "models", "identified", "by", "query", "parameters" ]
python
train
26.3
Azure/azure-cosmos-table-python
azure-cosmosdb-table/samples/table/table_usage.py
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/samples/table/table_usage.py#L203-L225
def create_entity_class(self): ''' Creates a class-based entity with fixed values, using all of the supported data types. ''' entity = Entity() # Partition key and row key must be strings and are required entity.PartitionKey = 'pk{}'.format(str(uuid.uuid4()).replace('-', '')) entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) # Some basic types are inferred entity.age = 39 # EdmType.INT64 entity.large = 933311100 # EdmType.INT64 entity.sex = 'male' # EdmType.STRING entity.married = True # EdmType.BOOLEAN entity.ratio = 3.1 # EdmType.DOUBLE entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME # Binary, Int32 and GUID must be explicitly typed entity.binary = EntityProperty(EdmType.BINARY, b'xyz') entity.other = EntityProperty(EdmType.INT32, 20) entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity
[ "def", "create_entity_class", "(", "self", ")", ":", "entity", "=", "Entity", "(", ")", "# Partition key and row key must be strings and are required", "entity", ".", "PartitionKey", "=", "'pk{}'", ".", "format", "(", "str", "(", "uuid", ".", "uuid4", "(", ")", ...
Creates a class-based entity with fixed values, using all of the supported data types.
[ "Creates", "a", "class", "-", "based", "entity", "with", "fixed", "values", "using", "all", "of", "the", "supported", "data", "types", "." ]
python
train
43.913043
dtmilano/AndroidViewClient
src/com/dtmilano/android/viewclient.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L920-L967
def touch(self, eventType=adbclient.DOWN_AND_UP, deltaX=0, deltaY=0): ''' Touches the center of this C{View}. The touch can be displaced from the center by using C{deltaX} and C{deltaY} values. @param eventType: The event type @type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP} @param deltaX: Displacement from center (X axis) @type deltaX: int @param deltaY: Displacement from center (Y axis) @type deltaY: int ''' (x, y) = self.getCenter() if deltaX: x += deltaX if deltaY: y += deltaY if DEBUG_TOUCH: print >>sys.stderr, "should touch @ (%d, %d)" % (x, y) if VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED and eventType == adbclient.DOWN_AND_UP: if WARNINGS: print >> sys.stderr, "ViewClient: touch workaround enabled" self.device.touch(x, y, eventType=adbclient.DOWN) time.sleep(50/1000.0) self.device.touch(x+10, y+10, eventType=adbclient.UP) else: if self.uiAutomatorHelper: selector = self.obtainSelectorForView() if selector: try: oid = self.uiAutomatorHelper.findObject(bySelector=selector) if DEBUG_UI_AUTOMATOR_HELPER: print >> sys.stderr, "oid=", oid print >> sys.stderr, "ignoring click delta to click View as UiObject" oid.click(); except RuntimeError as e: print >> sys.stderr, e.message print >> sys.stderr, "UiObject click failed, using co-ordinates" self.uiAutomatorHelper.click(x=x, y=y) else: # FIXME: # The View has no CD, TEXT or ID so we cannot use it in a selector to findObject() # We should try content description, text, and perhaps other properties before surrendering. # For now, tet's fall back to click(x, y) self.uiAutomatorHelper.click(x=x, y=y) else: self.device.touch(x, y, eventType=eventType)
[ "def", "touch", "(", "self", ",", "eventType", "=", "adbclient", ".", "DOWN_AND_UP", ",", "deltaX", "=", "0", ",", "deltaY", "=", "0", ")", ":", "(", "x", ",", "y", ")", "=", "self", ".", "getCenter", "(", ")", "if", "deltaX", ":", "x", "+=", "...
Touches the center of this C{View}. The touch can be displaced from the center by using C{deltaX} and C{deltaY} values. @param eventType: The event type @type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP} @param deltaX: Displacement from center (X axis) @type deltaX: int @param deltaY: Displacement from center (Y axis) @type deltaY: int
[ "Touches", "the", "center", "of", "this", "C", "{", "View", "}", ".", "The", "touch", "can", "be", "displaced", "from", "the", "center", "by", "using", "C", "{", "deltaX", "}", "and", "C", "{", "deltaY", "}", "values", "." ]
python
train
47.0625
fuzeman/PyUPnP
pyupnp/lict.py
https://github.com/fuzeman/PyUPnP/blob/6dea64be299952346a14300ab6cc7dac42736433/pyupnp/lict.py#L52-L72
def _get_object_key(self, p_object): """Get key from object""" matched_key = None matched_index = None if hasattr(p_object, self._searchNames[0]): return getattr(p_object, self._searchNames[0]) for x in xrange(len(self._searchNames)): key = self._searchNames[x] if hasattr(p_object, key): matched_key = key matched_index = x if matched_key is None: raise KeyError() if matched_index != 0 and self._searchOptimize: self._searchNames.insert(0, self._searchNames.pop(matched_index)) return getattr(p_object, matched_key)
[ "def", "_get_object_key", "(", "self", ",", "p_object", ")", ":", "matched_key", "=", "None", "matched_index", "=", "None", "if", "hasattr", "(", "p_object", ",", "self", ".", "_searchNames", "[", "0", "]", ")", ":", "return", "getattr", "(", "p_object", ...
Get key from object
[ "Get", "key", "from", "object" ]
python
train
31.333333
cloudmesh/cloudmesh-common
cloudmesh/common/Shell.py
https://github.com/cloudmesh/cloudmesh-common/blob/ae4fae09cd78205d179ea692dc58f0b0c8fea2b8/cloudmesh/common/Shell.py#L213-L224
def get_python(cls): """ returns the python and pip version :return: python version, pip version """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] python_version_s = '.'.join(v_string) # pip_version = pip.__version__ pip_version = Shell.pip("--version").split()[1] return python_version_s, pip_version
[ "def", "get_python", "(", "cls", ")", ":", "python_version", "=", "sys", ".", "version_info", "[", ":", "3", "]", "v_string", "=", "[", "str", "(", "i", ")", "for", "i", "in", "python_version", "]", "python_version_s", "=", "'.'", ".", "join", "(", "...
returns the python and pip version :return: python version, pip version
[ "returns", "the", "python", "and", "pip", "version", ":", "return", ":", "python", "version", "pip", "version" ]
python
train
33.916667
PythonCharmers/python-future
src/future/backports/http/client.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/http/client.py#L852-L900
def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) blocksize = 8192 # Python 2.7 array objects have a read method which is incompatible # with the 2-arg calling syntax below. if hasattr(data, "read") and not isinstance(data, array): if self.debuglevel > 0: print("sendIng a read()able") encode = False try: mode = data.mode except AttributeError: # io.BytesIO and other file-like objects don't have a `mode` # attribute. pass else: if "b" not in mode: encode = True if self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") self.sock.sendall(datablock) return try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data))
[ "def", "send", "(", "self", ",", "data", ")", ":", "if", "self", ".", "sock", "is", "None", ":", "if", "self", ".", "auto_open", ":", "self", ".", "connect", "(", ")", "else", ":", "raise", "NotConnected", "(", ")", "if", "self", ".", "debuglevel",...
Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object.
[ "Send", "data", "to", "the", "server", ".", "data", "can", "be", "a", "string", "object", "a", "bytes", "object", "an", "array", "object", "a", "file", "-", "like", "object", "that", "supports", "a", ".", "read", "()", "method", "or", "an", "iterable",...
python
train
36.22449
pypa/pipenv
pipenv/vendor/urllib3/util/timeout.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/timeout.py#L196-L211
def connect_timeout(self): """ Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None """ if self.total is None: return self._connect if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: return self.total return min(self._connect, self.total)
[ "def", "connect_timeout", "(", "self", ")", ":", "if", "self", ".", "total", "is", "None", ":", "return", "self", ".", "_connect", "if", "self", ".", "_connect", "is", "None", "or", "self", ".", "_connect", "is", "self", ".", "DEFAULT_TIMEOUT", ":", "r...
Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
[ "Get", "the", "value", "to", "use", "when", "setting", "a", "connection", "timeout", "." ]
python
train
33.3125
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/ir_lowering.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/ir_lowering.py#L224-L248
def _flatten_location_translations(location_translations): """If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation. """ sources_to_process = set(six.iterkeys(location_translations)) def _update_translation(source): """Return the proper (fully-flattened) translation for the given location.""" destination = location_translations[source] if destination not in location_translations: # "destination" cannot be translated, no further flattening required. return destination else: # "destination" can itself be translated -- do so, # and then flatten "source" to the final translation as well. sources_to_process.discard(destination) final_destination = _update_translation(destination) location_translations[source] = final_destination return final_destination while sources_to_process: _update_translation(sources_to_process.pop())
[ "def", "_flatten_location_translations", "(", "location_translations", ")", ":", "sources_to_process", "=", "set", "(", "six", ".", "iterkeys", "(", "location_translations", ")", ")", "def", "_update_translation", "(", "source", ")", ":", "\"\"\"Return the proper (fully...
If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation.
[ "If", "location", "A", "translates", "to", "B", "and", "B", "to", "C", "then", "make", "A", "translate", "directly", "to", "C", "." ]
python
train
47.88
quantmind/pulsar
pulsar/async/monitor.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/async/monitor.py#L73-L87
def spawn(self, monitor, kind=None, **params): '''Spawn a new :class:`Actor` and return its :class:`.ActorProxyMonitor`. ''' proxy = _spawn_actor(kind, monitor, **params) # Add to the list of managed actors if this is a remote actor if isinstance(proxy, Actor): self._register(proxy) return proxy else: proxy.monitor = monitor self.managed_actors[proxy.aid] = proxy future = actor_proxy_future(proxy) proxy.start() return future
[ "def", "spawn", "(", "self", ",", "monitor", ",", "kind", "=", "None", ",", "*", "*", "params", ")", ":", "proxy", "=", "_spawn_actor", "(", "kind", ",", "monitor", ",", "*", "*", "params", ")", "# Add to the list of managed actors if this is a remote actor", ...
Spawn a new :class:`Actor` and return its :class:`.ActorProxyMonitor`.
[ "Spawn", "a", "new", ":", "class", ":", "Actor", "and", "return", "its", ":", "class", ":", ".", "ActorProxyMonitor", "." ]
python
train
36.933333
google/grr
grr/client/grr_response_client/client_actions/standard.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/standard.py#L167-L179
def Run(self, args): """Lists a directory.""" try: directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) except (IOError, OSError) as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(directory.ListFiles()) files.sort(key=lambda x: x.pathspec.path) for response in files: self.SendReply(response)
[ "def", "Run", "(", "self", ",", "args", ")", ":", "try", ":", "directory", "=", "vfs", ".", "VFSOpen", "(", "args", ".", "pathspec", ",", "progress_callback", "=", "self", ".", "Progress", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e"...
Lists a directory.
[ "Lists", "a", "directory", "." ]
python
train
29.769231
dgketchum/satellite_image
sat_image/image.py
https://github.com/dgketchum/satellite_image/blob/0207fbb7b2bbf14f4307db65489bb4d4c5b92f52/sat_image/image.py#L329-L336
def ndvi(self): """ Normalized difference vegetation index. :return: NDVI """ red, nir = self.reflectance(3), self.reflectance(4) ndvi = self._divide_zero((nir - red), (nir + red), nan) return ndvi
[ "def", "ndvi", "(", "self", ")", ":", "red", ",", "nir", "=", "self", ".", "reflectance", "(", "3", ")", ",", "self", ".", "reflectance", "(", "4", ")", "ndvi", "=", "self", ".", "_divide_zero", "(", "(", "nir", "-", "red", ")", ",", "(", "nir"...
Normalized difference vegetation index. :return: NDVI
[ "Normalized", "difference", "vegetation", "index", ".", ":", "return", ":", "NDVI" ]
python
train
29.875
viswa-swami/python-foscam
libpyfoscam/foscam.py
https://github.com/viswa-swami/python-foscam/blob/d76f2f7016959b7b758751637fad103c9032e488/libpyfoscam/foscam.py#L286-L292
def set_sub_stream_format(self, format, callback=None): ''' Set the stream fromat of sub stream???? ''' params = {'format': format} return self.execute_command('setSubStreamFormat', params, callback=callback)
[ "def", "set_sub_stream_format", "(", "self", ",", "format", ",", "callback", "=", "None", ")", ":", "params", "=", "{", "'format'", ":", "format", "}", "return", "self", ".", "execute_command", "(", "'setSubStreamFormat'", ",", "params", ",", "callback", "="...
Set the stream fromat of sub stream????
[ "Set", "the", "stream", "fromat", "of", "sub", "stream????" ]
python
train
40.285714
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/genconutils/forminput.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/genconutils/forminput.py#L345-L365
def set_generation_type(self, num_processors=-1, num_splits=1000, verbose=-1): """Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1. """ self.parallel_input.num_processors = num_processors self.parallel_input.num_splits = num_splits self.parallel_input.verbose = verbose return
[ "def", "set_generation_type", "(", "self", ",", "num_processors", "=", "-", "1", ",", "num_splits", "=", "1000", ",", "verbose", "=", "-", "1", ")", ":", "self", ".", "parallel_input", ".", "num_processors", "=", "num_processors", "self", ".", "parallel_inpu...
Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1.
[ "Change", "generation", "type", "." ]
python
train
50.904762
OpenMath/py-openmath
openmath/convert_pickle.py
https://github.com/OpenMath/py-openmath/blob/4906aa9ccf606f533675c28823772e07c30fd220/openmath/convert_pickle.py#L167-L178
def OMSymbol(self, module, name): r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/') """ return om.OMSymbol(cdbase=self._cdbase, cd=module, name=name)
[ "def", "OMSymbol", "(", "self", ",", "module", ",", "name", ")", ":", "return", "om", ".", "OMSymbol", "(", "cdbase", "=", "self", ".", "_cdbase", ",", "cd", "=", "module", ",", "name", "=", "name", ")" ]
r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/')
[ "r", "Helper", "function", "to", "build", "an", "OMS", "object", "EXAMPLES", "::" ]
python
test
38.666667
exosite-labs/pyonep
pyonep/onep.py
https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/onep.py#L438-L449
def recordbatch(self, auth, resource, entries, defer=False): """ Records a list of historical entries to the resource specified. Calls a function that bulids a request that writes a list of historical entries to the specified resource. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. entries: A list of entries to write to the resource. """ return self._call('recordbatch', auth, [resource, entries], defer)
[ "def", "recordbatch", "(", "self", ",", "auth", ",", "resource", ",", "entries", ",", "defer", "=", "False", ")", ":", "return", "self", ".", "_call", "(", "'recordbatch'", ",", "auth", ",", "[", "resource", ",", "entries", "]", ",", "defer", ")" ]
Records a list of historical entries to the resource specified. Calls a function that bulids a request that writes a list of historical entries to the specified resource. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. entries: A list of entries to write to the resource.
[ "Records", "a", "list", "of", "historical", "entries", "to", "the", "resource", "specified", "." ]
python
train
42.5
B2W-BIT/aiologger
aiologger/formatters/json.py
https://github.com/B2W-BIT/aiologger/blob/0b366597a8305d5577a267305e81d5e4784cd398/aiologger/formatters/json.py#L44-L59
def format(self, record: logging.LogRecord) -> str: """ Formats a record and serializes it as a JSON str. If record message isnt already a dict, initializes a new dict and uses `default_msg_fieldname` as a key as the record msg as the value. """ msg: Union[str, dict] = record.msg if not isinstance(record.msg, dict): msg = {self.default_msg_fieldname: msg} if record.exc_info: # type: ignore msg["exc_info"] = record.exc_info if record.exc_text: # type: ignore msg["exc_text"] = record.exc_text # type: ignore return self.serializer(msg, default=self._default_handler)
[ "def", "format", "(", "self", ",", "record", ":", "logging", ".", "LogRecord", ")", "->", "str", ":", "msg", ":", "Union", "[", "str", ",", "dict", "]", "=", "record", ".", "msg", "if", "not", "isinstance", "(", "record", ".", "msg", ",", "dict", ...
Formats a record and serializes it as a JSON str. If record message isnt already a dict, initializes a new dict and uses `default_msg_fieldname` as a key as the record msg as the value.
[ "Formats", "a", "record", "and", "serializes", "it", "as", "a", "JSON", "str", ".", "If", "record", "message", "isnt", "already", "a", "dict", "initializes", "a", "new", "dict", "and", "uses", "default_msg_fieldname", "as", "a", "key", "as", "the", "record...
python
train
42.1875
saltstack/salt
salt/runners/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/vault.py#L262-L269
def _get_token_create_url(config): ''' Create Vault url for token creation ''' role_name = config.get('role_name', None) auth_path = '/v1/auth/token/create' base_url = config['url'] return '/'.join(x.strip('/') for x in (base_url, auth_path, role_name) if x)
[ "def", "_get_token_create_url", "(", "config", ")", ":", "role_name", "=", "config", ".", "get", "(", "'role_name'", ",", "None", ")", "auth_path", "=", "'/v1/auth/token/create'", "base_url", "=", "config", "[", "'url'", "]", "return", "'/'", ".", "join", "(...
Create Vault url for token creation
[ "Create", "Vault", "url", "for", "token", "creation" ]
python
train
34.875
stevearc/dql
bin/install.py
https://github.com/stevearc/dql/blob/e9d3aa22873076dae5ebd02e35318aa996b1e56a/bin/install.py#L39-L55
def main(): """ Build a standalone dql executable """ venv_dir = tempfile.mkdtemp() try: make_virtualenv(venv_dir) print("Downloading dependencies") pip = os.path.join(venv_dir, "bin", "pip") subprocess.check_call([pip, "install", "pex"]) print("Building executable") pex = os.path.join(venv_dir, "bin", "pex") subprocess.check_call([pex, "dql", "-m", "dql:main", "-o", "dql"]) print("dql executable written to %s" % os.path.abspath("dql")) finally: shutil.rmtree(venv_dir)
[ "def", "main", "(", ")", ":", "venv_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "make_virtualenv", "(", "venv_dir", ")", "print", "(", "\"Downloading dependencies\"", ")", "pip", "=", "os", ".", "path", ".", "join", "(", "venv_dir", ",",...
Build a standalone dql executable
[ "Build", "a", "standalone", "dql", "executable" ]
python
train
32.235294
pandas-dev/pandas
doc/make.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L256-L262
def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
[ "def", "clean", "(", ")", ":", "shutil", ".", "rmtree", "(", "BUILD_PATH", ",", "ignore_errors", "=", "True", ")", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "SOURCE_PATH", ",", "'reference'", ",", "'api'", ")", ",", "ignore_err...
Clean documentation generated files.
[ "Clean", "documentation", "generated", "files", "." ]
python
train
34.285714
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/suite.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/suite.py#L196-L232
def run(self, result): """Run tests in suite inside of suite fixtures. """ # proxy the result for myself log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests) #import pdb #pdb.set_trace() if self.resultProxy: result, orig = self.resultProxy(result, self), result else: result, orig = result, result try: self.setUp() except KeyboardInterrupt: raise except: self.error_context = 'setup' result.addError(self, self._exc_info()) return try: for test in self._tests: if result.shouldStop: log.debug("stopping") break # each nose.case.Test will create its own result proxy # so the cases need the original result, to avoid proxy # chains test(orig) finally: self.has_run = True try: self.tearDown() except KeyboardInterrupt: raise except: self.error_context = 'teardown' result.addError(self, self._exc_info())
[ "def", "run", "(", "self", ",", "result", ")", ":", "# proxy the result for myself", "log", ".", "debug", "(", "\"suite %s (%s) run called, tests: %s\"", ",", "id", "(", "self", ")", ",", "self", ",", "self", ".", "_tests", ")", "#import pdb", "#pdb.set_trace()"...
Run tests in suite inside of suite fixtures.
[ "Run", "tests", "in", "suite", "inside", "of", "suite", "fixtures", "." ]
python
test
33.108108
dcramer/logan
logan/runner.py
https://github.com/dcramer/logan/blob/8b18456802d631a822e2823bf9a4e9810a15a20e/logan/runner.py#L63-L141
def configure_app(config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None): """ :param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes. """ global __configured project_filename = sanitize_name(project) if default_config_path is None: default_config_path = '~/%s/%s.conf.py' % (project_filename, project_filename) if settings_envvar is None: settings_envvar = project_filename.upper() + '_CONF' if config_module_name is None: config_module_name = project_filename + '_config' # normalize path if settings_envvar in os.environ: default_config_path = os.environ.get(settings_envvar) else: default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path))) if not config_path: config_path = default_config_path config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): if runner_name: raise ValueError("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)) raise ValueError("Configuration file does not exist at %r" % (config_path,)) os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name def settings_callback(settings): if initializer is None: return try: initializer({ 'project': project, 'config_path': config_path, 'settings': settings, }) except Exception: # XXX: Django doesn't like various errors in this path import sys import traceback traceback.print_exc() sys.exit(1) importer.install( config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback) __configured = True # HACK(dcramer): we need to force access of django.conf.settings to # ensure we don't hit any import-driven recursive behavior from django.conf import settings hasattr(settings, 'INSTALLED_APPS') if on_configure: on_configure({ 'project': project, 'config_path': config_path, 'settings': settings, })
[ "def", "configure_app", "(", "config_path", "=", "None", ",", "project", "=", "None", ",", "default_config_path", "=", "None", ",", "default_settings", "=", "None", ",", "settings_initializer", "=", "None", ",", "settings_envvar", "=", "None", ",", "initializer"...
:param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes.
[ ":", "param", "project", ":", "should", "represent", "the", "canonical", "name", "for", "the", "project", "generally", "the", "same", "name", "it", "assigned", "in", "distutils", ".", ":", "param", "default_config_path", ":", "the", "default", "location", "for...
python
train
37.025316
yunojuno/elasticsearch-django
elasticsearch_django/models.py
https://github.com/yunojuno/elasticsearch-django/blob/e8d98d32bcd77f1bedb8f1a22b6523ca44ffd489/elasticsearch_django/models.py#L308-L342
def as_search_action(self, *, index, action): """ Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary. """ if action not in ("index", "update", "delete"): raise ValueError("Action must be 'index', 'update' or 'delete'.") document = { "_index": index, "_type": self.search_doc_type, "_op_type": action, "_id": self.pk, } if action == "index": document["_source"] = self.as_search_document(index=index) elif action == "update": document["doc"] = self.as_search_document(index=index) return document
[ "def", "as_search_action", "(", "self", ",", "*", ",", "index", ",", "action", ")", ":", "if", "action", "not", "in", "(", "\"index\"", ",", "\"update\"", ",", "\"delete\"", ")", ":", "raise", "ValueError", "(", "\"Action must be 'index', 'update' or 'delete'.\"...
Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary.
[ "Return", "an", "object", "as", "represented", "in", "a", "bulk", "api", "operation", "." ]
python
train
37.085714
yyuu/botornado
boto/vpc/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/vpc/__init__.py#L489-L524
def get_all_subnets(self, subnet_ids=None, filters=None): """ Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet` """ params = {} if subnet_ids: self.build_list_params(params, subnet_ids, 'SubnetId') if filters: i = 1 for filter in filters: params[('Filter.%d.Name' % i)] = filter[0] params[('Filter.%d.Value.1' % i)] = filter[1] i += 1 return self.get_list('DescribeSubnets', params, [('item', Subnet)])
[ "def", "get_all_subnets", "(", "self", ",", "subnet_ids", "=", "None", ",", "filters", "=", "None", ")", ":", "params", "=", "{", "}", "if", "subnet_ids", ":", "self", ".", "build_list_params", "(", "params", ",", "subnet_ids", ",", "'SubnetId'", ")", "i...
Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet`
[ "Retrieve", "information", "about", "your", "Subnets", ".", "You", "can", "filter", "results", "to", "return", "information", "only", "about", "those", "Subnets", "that", "match", "your", "search", "parameters", ".", "Otherwise", "all", "Subnets", "associated", ...
python
train
40.75
hapyak/flask-peewee-swagger
flask_peewee_swagger/swagger.py
https://github.com/hapyak/flask-peewee-swagger/blob/1b7dd54a5e823401b80e04ac421ee15c9fab3f06/flask_peewee_swagger/swagger.py#L181-L200
def get_listing_api(self, resource): """ Generates the meta descriptor for the resource listing api. """ get_all_params = self.get_listing_parameters(resource) get_all_api = { 'path': '/%s/' % resource.get_api_name(), 'description': 'Operations on %s' % resource.model.__name__, 'operations': [ { 'httpMethod': 'GET', 'nickname': 'list%ss' % resource.model .__name__, 'summary': 'Find %ss' % resource.model.__name__, 'parameters': get_all_params, } ] } return get_all_api
[ "def", "get_listing_api", "(", "self", ",", "resource", ")", ":", "get_all_params", "=", "self", ".", "get_listing_parameters", "(", "resource", ")", "get_all_api", "=", "{", "'path'", ":", "'/%s/'", "%", "resource", ".", "get_api_name", "(", ")", ",", "'des...
Generates the meta descriptor for the resource listing api.
[ "Generates", "the", "meta", "descriptor", "for", "the", "resource", "listing", "api", "." ]
python
train
33.75
google/grr
grr/server/grr_response_server/databases/mysql_flows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L437-L498
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time, cursor=None): """Marks a flow as being processed on this worker and returns it.""" query = ("SELECT " + self.FLOW_DB_FIELDS + "FROM flows WHERE client_id=%s AND flow_id=%s") cursor.execute( query, [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]) response = cursor.fetchall() if not response: raise db.UnknownFlowError(client_id, flow_id) row, = response rdf_flow = self._FlowObjectFromRow(row) now = rdfvalue.RDFDatetime.Now() if rdf_flow.processing_on and rdf_flow.processing_deadline > now: raise ValueError("Flow %s on client %s is already being processed." % (client_id, flow_id)) if (rdf_flow.parent_hunt_id is not None and # TODO(user): remove the check for a legacy hunt prefix as soon as # AFF4 is gone. not rdf_flow.parent_hunt_id.startswith("H:")): query = "SELECT hunt_state FROM hunts WHERE hunt_id=%s" args = [db_utils.HuntIDToInt(rdf_flow.parent_hunt_id)] rows_found = cursor.execute(query, args) if rows_found == 1: hunt_state, = cursor.fetchone() if (hunt_state is not None and not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(hunt_state)): raise db.ParentHuntIsNotRunningError(client_id, flow_id, rdf_flow.parent_hunt_id, hunt_state) update_query = ("UPDATE flows SET " "processing_on=%s, " "processing_since=FROM_UNIXTIME(%s), " "processing_deadline=FROM_UNIXTIME(%s) " "WHERE client_id=%s and flow_id=%s") processing_deadline = now + processing_time process_id_string = utils.ProcessIdString() args = [ process_id_string, mysql_utils.RDFDatetimeToTimestamp(now), mysql_utils.RDFDatetimeToTimestamp(processing_deadline), db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id) ] cursor.execute(update_query, args) # This needs to happen after we are sure that the write has succeeded. rdf_flow.processing_on = process_id_string rdf_flow.processing_since = now rdf_flow.processing_deadline = processing_deadline return rdf_flow
[ "def", "LeaseFlowForProcessing", "(", "self", ",", "client_id", ",", "flow_id", ",", "processing_time", ",", "cursor", "=", "None", ")", ":", "query", "=", "(", "\"SELECT \"", "+", "self", ".", "FLOW_DB_FIELDS", "+", "\"FROM flows WHERE client_id=%s AND flow_id=%s\"...
Marks a flow as being processed on this worker and returns it.
[ "Marks", "a", "flow", "as", "being", "processed", "on", "this", "worker", "and", "returns", "it", "." ]
python
train
39.758065
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L170-L243
def __get_conn(**kwargs): ''' Detects what type of dom this node is and attempts to connect to the correct hypervisor via libvirt. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults ''' # This has only been tested on kvm and xen, it needs to be expanded to # support all vm layers supported by libvirt # Connection string works on bhyve, but auth is not tested. username = kwargs.get('username', None) password = kwargs.get('password', None) conn_str = kwargs.get('connection', None) if not conn_str: conn_str = __salt__['config.get']('virt.connect', None) if conn_str is not None: salt.utils.versions.warn_until( 'Sodium', '\'virt.connect\' configuration property has been deprecated in favor ' 'of \'virt:connection:uri\'. \'virt.connect\' will stop being used in ' '{version}.' ) else: conn_str = __salt__['config.get']('libvirt:connection', None) if conn_str is not None: salt.utils.versions.warn_until( 'Sodium', '\'libvirt.connection\' configuration property has been deprecated in favor ' 'of \'virt:connection:uri\'. \'libvirt.connection\' will stop being used in ' '{version}.' ) conn_str = __salt__['config.get']('virt:connection:uri', conn_str) hypervisor = __salt__['config.get']('libvirt:hypervisor', None) if hypervisor is not None: salt.utils.versions.warn_until( 'Sodium', '\'libvirt.hypervisor\' configuration property has been deprecated. ' 'Rather use the \'virt:connection:uri\' to properly define the libvirt ' 'URI or alias of the host to connect to. \'libvirt:hypervisor\' will ' 'stop being used in {version}.' ) if hypervisor == 'esxi' and conn_str is None: salt.utils.versions.warn_until( 'Sodium', 'esxi hypervisor default with no default connection URI detected, ' 'please set \'virt:connection:uri\' to \'esx\' for keep the legacy ' 'behavior. Will default to libvirt guess once \'libvirt:hypervisor\' ' 'configuration is removed in {version}.' ) conn_str = 'esx' try: auth_types = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT, libvirt.VIR_CRED_ECHOPROMPT, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_EXTERNAL] conn = libvirt.openAuth(conn_str, [auth_types, __get_request_auth(username, password), None], 0) except Exception: raise CommandExecutionError( 'Sorry, {0} failed to open a connection to the hypervisor ' 'software at {1}'.format( __grains__['fqdn'], conn_str ) ) return conn
[ "def", "__get_conn", "(", "*", "*", "kwargs", ")", ":", "# This has only been tested on kvm and xen, it needs to be expanded to", "# support all vm layers supported by libvirt", "# Connection string works on bhyve, but auth is not tested.", "username", "=", "kwargs", ".", "get", "(",...
Detects what type of dom this node is and attempts to connect to the correct hypervisor via libvirt. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults
[ "Detects", "what", "type", "of", "dom", "this", "node", "is", "and", "attempts", "to", "connect", "to", "the", "correct", "hypervisor", "via", "libvirt", "." ]
python
train
41.689189
christophertbrown/bioscripts
ctbBio/cluster_ani.py
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L215-L235
def parse_checkM_tables(tables): """ convert checkM genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('Bin Id'): header = line header[8] = 'genome size (bp)' header[5] = '#SCGs' header[6] = '#SCG duplicates' continue ID, info = line[0], line info = [to_int(i) for i in info] ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
[ "def", "parse_checkM_tables", "(", "tables", ")", ":", "g2info", "=", "{", "}", "for", "table", "in", "tables", ":", "for", "line", "in", "open", "(", "table", ")", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "'\\t'", ")", ...
convert checkM genome info tables to dictionary
[ "convert", "checkM", "genome", "info", "tables", "to", "dictionary" ]
python
train
35.285714
dmwm/DBS
Server/Python/src/dbs/web/DBSWriterModel.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Server/Python/src/dbs/web/DBSWriterModel.py#L342-L400
def insertFile(self, qInserts=False): """ API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....] """ if qInserts in (False, 'False'): qInserts=False try: body = request.body.read() indata = cjson.decode(body)["files"] if not isinstance(indata, (list, dict)): dbsExceptionHandler("dbsException-invalid-input", "Invalid Input DataType", self.logger.exception, \ "insertFile expects input as list or dirc") businput = [] if isinstance(indata, dict): indata = [indata] indata = validateJSONInputNoCopy("files", indata) for f in indata: f.update({ #"dataset":f["dataset"], "creation_date": f.get("creation_date", dbsUtils().getTime()), "create_by" : dbsUtils().getCreateBy(), "last_modification_date": f.get("last_modification_date", dbsUtils().getTime()), "last_modified_by": f.get("last_modified_by", dbsUtils().getCreateBy()), "file_lumi_list":f.get("file_lumi_list", []), "file_parent_list":f.get("file_parent_list", []), "file_assoc_list":f.get("assoc_list", []), "file_output_config_list":f.get("file_output_config_list", [])}) businput.append(f) self.dbsFile.insertFile(businput, qInserts) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert File input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
[ "def", "insertFile", "(", "self", ",", "qInserts", "=", "False", ")", ":", "if", "qInserts", "in", "(", "False", ",", "'False'", ")", ":", "qInserts", "=", "False", "try", ":", "body", "=", "request", ".", "body", ".", "read", "(", ")", "indata", "...
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
[ "API", "to", "insert", "a", "list", "of", "file", "into", "DBS", "in", "DBS", ".", "Up", "to", "10", "files", "can", "be", "inserted", "in", "one", "request", "." ]
python
train
60.372881
droope/droopescan
dscan/common/output.py
https://github.com/droope/droopescan/blob/424c48a0f9d12b4536dbef5a786f0fbd4ce9519a/dscan/common/output.py#L82-L107
def result(self, result, functionality): """ For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init """ for enumerate in result: # The host is a special header, we must not attempt to display it. if enumerate == "host" or enumerate == "cms_name": continue result_ind = result[enumerate] finds = result_ind['finds'] is_empty = result_ind['is_empty'] template_str = functionality[enumerate]['template'] template_params = { 'noun': enumerate, 'Noun': enumerate.capitalize(), 'items': finds, 'empty': is_empty, } self.echo(template(template_str, template_params))
[ "def", "result", "(", "self", ",", "result", ",", "functionality", ")", ":", "for", "enumerate", "in", "result", ":", "# The host is a special header, we must not attempt to display it.", "if", "enumerate", "==", "\"host\"", "or", "enumerate", "==", "\"cms_name\"", ":...
For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init
[ "For", "the", "final", "result", "of", "the", "scan", "." ]
python
train
35.653846
ternaris/marv
marv/cli.py
https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/marv/cli.py#L234-L244
def marvcli_undiscard(datasets): """Undiscard DATASETS previously discarded.""" create_app() setids = parse_setids(datasets, discarded=True) dataset = Dataset.__table__ stmt = dataset.update()\ .where(dataset.c.setid.in_(setids))\ .values(discarded=False) db.session.execute(stmt) db.session.commit()
[ "def", "marvcli_undiscard", "(", "datasets", ")", ":", "create_app", "(", ")", "setids", "=", "parse_setids", "(", "datasets", ",", "discarded", "=", "True", ")", "dataset", "=", "Dataset", ".", "__table__", "stmt", "=", "dataset", ".", "update", "(", ")",...
Undiscard DATASETS previously discarded.
[ "Undiscard", "DATASETS", "previously", "discarded", "." ]
python
train
32.272727
maweigert/gputools
gputools/transforms/scale.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/scale.py#L23-L88
def scale(data, scale = (1.,1.,1.), interpolation = "linear"): """ returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output """ if not (isinstance(data, np.ndarray) and data.ndim == 3): raise ValueError("input data has to be a 3d array!") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) options_types = {np.uint8:["-D","TYPENAME=uchar","-D","READ_IMAGE=read_imageui"], np.uint16: ["-D","TYPENAME=short","-D", "READ_IMAGE=read_imageui"], np.float32: ["-D","TYPENAME=float", "-D","READ_IMAGE=read_imagef"], } dtype = data.dtype.type if not dtype in options_types: raise ValueError("type %s not supported! Available: %s"%(dtype ,str(list(options_types.keys())))) if not isinstance(scale,(tuple, list, np.ndarray)): scale = (scale,)*3 if len(scale) != 3: raise ValueError("scale = %s misformed"%scale) d_im = OCLImage.from_array(data) nshape = _scale_shape(data.shape,scale) res_g = OCLArray.empty(nshape,dtype) prog = OCLProgram(abspath("kernels/scale.cl"), build_options=interpolation_defines[interpolation]+options_types[dtype ]) prog.run_kernel("scale", res_g.shape[::-1],None, d_im,res_g.data) return res_g.get()
[ "def", "scale", "(", "data", ",", "scale", "=", "(", "1.", ",", "1.", ",", "1.", ")", ",", "interpolation", "=", "\"linear\"", ")", ":", "if", "not", "(", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", "and", "data", ".", "ndim", "=="...
returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output
[ "returns", "a", "interpolated", "scaled", "version", "of", "data", "the", "output", "shape", "is", "scaled", "too", ".", "Parameters", "----------", "data", ":", "ndarray", "3d", "input", "array", "scale", ":", "float", "tuple", "scaling", "factor", "along", ...
python
train
28.439394
dturanski/springcloudstream
springcloudstream/component.py
https://github.com/dturanski/springcloudstream/blob/208b542f9eba82e97882d52703af8e965a62a980/springcloudstream/component.py#L47-L57
def get_message_handler(self, message_handlers): """ Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler """ encoder = self.options.encoder try: return message_handlers[encoder] except KeyError: raise NotImplementedError('No RequestHandler defined for given encoder (%s).' % encoder)
[ "def", "get_message_handler", "(", "self", ",", "message_handlers", ")", ":", "encoder", "=", "self", ".", "options", ".", "encoder", "try", ":", "return", "message_handlers", "[", "encoder", "]", "except", "KeyError", ":", "raise", "NotImplementedError", "(", ...
Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler
[ "Create", "a", "MessageHandler", "for", "the", "configured", "Encoder", ":", "param", "message_handlers", ":", "a", "dictionart", "of", "MessageHandler", "keyed", "by", "encoder", ":", "return", ":", "a", "MessageHandler" ]
python
train
41.727273
gem/oq-engine
openquake/risklib/scientific.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/risklib/scientific.py#L1369-L1378
def pair(self, array, stats): """ :return (array, array_stats) if stats, else (array, None) """ if len(self.weights) > 1 and stats: statnames, statfuncs = zip(*stats) array_stats = compute_stats2(array, statfuncs, self.weights) else: array_stats = None return array, array_stats
[ "def", "pair", "(", "self", ",", "array", ",", "stats", ")", ":", "if", "len", "(", "self", ".", "weights", ")", ">", "1", "and", "stats", ":", "statnames", ",", "statfuncs", "=", "zip", "(", "*", "stats", ")", "array_stats", "=", "compute_stats2", ...
:return (array, array_stats) if stats, else (array, None)
[ ":", "return", "(", "array", "array_stats", ")", "if", "stats", "else", "(", "array", "None", ")" ]
python
train
35.3
romanz/trezor-agent
libagent/util.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/util.py#L201-L216
def memoize(func): """Simple caching decorator.""" cache = {} @functools.wraps(func) def wrapper(*args, **kwargs): """Caching wrapper.""" key = (args, tuple(sorted(kwargs.items()))) if key in cache: return cache[key] else: result = func(*args, **kwargs) cache[key] = result return result return wrapper
[ "def", "memoize", "(", "func", ")", ":", "cache", "=", "{", "}", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Caching wrapper.\"\"\"", "key", "=", "(", "args", ",", ...
Simple caching decorator.
[ "Simple", "caching", "decorator", "." ]
python
train
24.3125