code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _lint(): """Run lint and return an exit code.""" # Flake8 doesn't have an easy way to run checks using a Python function, so # just fork off another process to do it. # Python 3 compat: # - The result of subprocess call outputs are byte strings, meaning we need # to pass a byte string to endswith. project_python_files = [filename for filename in get_project_files() if filename.endswith(b'.py')] retcode = subprocess.call( ['flake8', '--max-complexity=10'] + project_python_files) if retcode == 0: print_success_message('No style errors') return retcode
Run lint and return an exit code.
Below is the the instruction that describes the task: ### Input: Run lint and return an exit code. ### Response: def _lint(): """Run lint and return an exit code.""" # Flake8 doesn't have an easy way to run checks using a Python function, so # just fork off another process to do it. # Python 3 compat: # - The result of subprocess call outputs are byte strings, meaning we need # to pass a byte string to endswith. project_python_files = [filename for filename in get_project_files() if filename.endswith(b'.py')] retcode = subprocess.call( ['flake8', '--max-complexity=10'] + project_python_files) if retcode == 0: print_success_message('No style errors') return retcode
def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
SOL payload callback
Below is the the instruction that describes the task: ### Input: SOL payload callback ### Response: def _got_sol_payload(self, payload): """SOL payload callback """ # TODO(jbjohnso) test cases to throw some likely scenarios at functions # for example, retry with new data, retry with no new data # retry with unexpected sequence number if type(payload) == dict: # we received an error condition self.activated = False self._print_error(payload) return newseq = payload[0] & 0b1111 ackseq = payload[1] & 0b1111 ackcount = payload[2] nacked = payload[3] & 0b1000000 breakdetected = payload[3] & 0b10000 # for now, ignore overrun. I assume partial NACK for this reason or # for no reason would be treated the same, new payload with partial # data. remdata = "" remdatalen = 0 flag = 0 if not self.poweredon: flag |= 0b1100000 if not self.activated: flag |= 0b1010000 if newseq != 0: # this packet at least has some data to send to us.. if len(payload) > 4: remdatalen = len(payload[4:]) # store remote len before dupe # retry logic, we must ack *this* many even if it is # a retry packet with new partial data remdata = bytes(payload[4:]) if newseq == self.remseq: # it is a retry, but could have new data if remdatalen > self.lastsize: remdata = bytes(remdata[4 + self.lastsize:]) else: # no new data... remdata = "" else: # TODO(jbjohnso) what if remote sequence number is wrong?? self.remseq = newseq self.lastsize = remdatalen ackpayload = bytearray((0, self.remseq, remdatalen, flag)) # Why not put pending data into the ack? because it's rare # and might be hard to decide what to do in the context of # retry situation try: self.send_payload(ackpayload, retry=False) except exc.IpmiException: # if the session is broken, then close the SOL session self.close() if remdata: # Do not subject callers to empty data self._print_data(remdata) if self.myseq != 0 and ackseq == self.myseq: # the bmc has something # to say about last xmit self.awaitingack = False if nacked and not breakdetected: # the BMC was in some way unhappy newtext = self.lastpayload[4 + ackcount:] with self.outputlock: if (self.pendingoutput and not isinstance(self.pendingoutput[0], dict)): self.pendingoutput[0] = newtext + self.pendingoutput[0] else: self.pendingoutput = [newtext] + self.pendingoutput # self._sendpendingoutput() checks len(self._sendpendingoutput) self._sendpendingoutput() elif ackseq != 0 and self.awaitingack: # if an ack packet came in, but did not match what we # expected, retry our payload now. # the situation that was triggered was a senseless retry # when data came in while we xmitted. In theory, a BMC # should handle a retry correctly, but some do not, so # try to mitigate by avoiding overeager retries # occasional retry of a packet # sooner than timeout suggests is evidently a big deal self.send_payload(payload=self.lastpayload)
def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. """ lnl = 0 # Re-factorize the Cholesky decomposition? try: self._ll_info except AttributeError: refactor = True if refactor: # Smooth the light curve and reset the outlier mask t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) # Now re-factorize the Cholesky decomposition self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] # Reset the outlier masks self.outmask = outmask self.transitmask = transitmask # Compute the likelihood for each chunk amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Get the inverse covariance and the mask CDK = self._ll_info[b][0] m = self._ll_info[b][1] # Compute the maximum likelihood model amplitude # (for transits, this is the transit depth) var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) # Compute the residual r = self.fraw[m] - amp[b] * model[m] # Finally, compute the likelihood lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: # We need to multiply the Gaussians for all chunks to get the # amplitude and amplitude variance for the entire dataset vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`.
Below is the the instruction that describes the task: ### Input: r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. ### Response: def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. """ lnl = 0 # Re-factorize the Cholesky decomposition? try: self._ll_info except AttributeError: refactor = True if refactor: # Smooth the light curve and reset the outlier mask t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) # Now re-factorize the Cholesky decomposition self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] # Reset the outlier masks self.outmask = outmask self.transitmask = transitmask # Compute the likelihood for each chunk amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Get the inverse covariance and the mask CDK = self._ll_info[b][0] m = self._ll_info[b][1] # Compute the maximum likelihood model amplitude # (for transits, this is the transit depth) var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) # Compute the residual r = self.fraw[m] - amp[b] * model[m] # Finally, compute the likelihood lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: # We need to multiply the Gaussians for all chunks to get the # amplitude and amplitude variance for the entire dataset vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
def form_valid(self,form): ''' Even if this form is valid, the handlers for this form may have added messages to the request. In that case, then the page should be handled as if the form were invalid. Otherwise, update the session data with the form data and then move to the next view ''' reg = self.temporaryRegistration # The session expires after a period of inactivity that is specified in preferences. expiry = timezone.now() + timedelta(minutes=getConstant('registration__sessionExpiryMinutes')) self.request.session[REG_VALIDATION_STR]["temporaryRegistrationExpiry"] = \ expiry.strftime('%Y-%m-%dT%H:%M:%S%z') self.request.session.modified = True # Update the expiration date for this registration, and pass in the data from # this form. reg.expirationDate = expiry reg.firstName = form.cleaned_data.pop('firstName') reg.lastName = form.cleaned_data.pop('lastName') reg.email = form.cleaned_data.pop('email') reg.phone = form.cleaned_data.pop('phone', None) reg.student = form.cleaned_data.pop('student',False) reg.comments = form.cleaned_data.pop('comments',None) reg.howHeardAboutUs = form.cleaned_data.pop('howHeardAboutUs',None) # Anything else in the form goes to the TemporaryRegistration data. reg.data.update(form.cleaned_data) reg.save() # This signal (formerly the post_temporary_registration signal) allows # vouchers to be applied temporarily, and it can be used for other tasks post_student_info.send(sender=StudentInfoView,registration=reg) return HttpResponseRedirect(self.get_success_url())
Even if this form is valid, the handlers for this form may have added messages to the request. In that case, then the page should be handled as if the form were invalid. Otherwise, update the session data with the form data and then move to the next view
Below is the the instruction that describes the task: ### Input: Even if this form is valid, the handlers for this form may have added messages to the request. In that case, then the page should be handled as if the form were invalid. Otherwise, update the session data with the form data and then move to the next view ### Response: def form_valid(self,form): ''' Even if this form is valid, the handlers for this form may have added messages to the request. In that case, then the page should be handled as if the form were invalid. Otherwise, update the session data with the form data and then move to the next view ''' reg = self.temporaryRegistration # The session expires after a period of inactivity that is specified in preferences. expiry = timezone.now() + timedelta(minutes=getConstant('registration__sessionExpiryMinutes')) self.request.session[REG_VALIDATION_STR]["temporaryRegistrationExpiry"] = \ expiry.strftime('%Y-%m-%dT%H:%M:%S%z') self.request.session.modified = True # Update the expiration date for this registration, and pass in the data from # this form. reg.expirationDate = expiry reg.firstName = form.cleaned_data.pop('firstName') reg.lastName = form.cleaned_data.pop('lastName') reg.email = form.cleaned_data.pop('email') reg.phone = form.cleaned_data.pop('phone', None) reg.student = form.cleaned_data.pop('student',False) reg.comments = form.cleaned_data.pop('comments',None) reg.howHeardAboutUs = form.cleaned_data.pop('howHeardAboutUs',None) # Anything else in the form goes to the TemporaryRegistration data. reg.data.update(form.cleaned_data) reg.save() # This signal (formerly the post_temporary_registration signal) allows # vouchers to be applied temporarily, and it can be used for other tasks post_student_info.send(sender=StudentInfoView,registration=reg) return HttpResponseRedirect(self.get_success_url())
def validate_identifier(self, field): """Validate field identifier.""" if field.data: field.data = field.data.lower() if Community.get(field.data, with_deleted=True): raise validators.ValidationError( _('The identifier already exists. ' 'Please choose a different one.'))
Validate field identifier.
Below is the the instruction that describes the task: ### Input: Validate field identifier. ### Response: def validate_identifier(self, field): """Validate field identifier.""" if field.data: field.data = field.data.lower() if Community.get(field.data, with_deleted=True): raise validators.ValidationError( _('The identifier already exists. ' 'Please choose a different one.'))
def get_logview_address(self, hours=None): """ Get logview address of the instance object by hours. :param hours: :return: logview address :rtype: str """ hours = hours or options.log_view_hours project = self.project url = '%s/authorization' % project.resource() policy = { 'expires_in_hours': hours, 'policy': { 'Statement': [{ 'Action': ['odps:Read'], 'Effect': 'Allow', 'Resource': 'acs:odps:*:projects/%s/instances/%s' % \ (project.name, self.id) }], 'Version': '1', } } headers = {'Content-Type': 'application/json'} params = {'sign_bearer_token': ''} data = json.dumps(policy) res = self._client.post(url, data, headers=headers, params=params) content = res.text if six.PY3 else res.content root = ElementTree.fromstring(content) token = root.find('Result').text link = options.log_view_host + "/logview/?h=" + self._client.endpoint + "&p=" \ + project.name + "&i=" + self.id + "&token=" + token return link
Get logview address of the instance object by hours. :param hours: :return: logview address :rtype: str
Below is the the instruction that describes the task: ### Input: Get logview address of the instance object by hours. :param hours: :return: logview address :rtype: str ### Response: def get_logview_address(self, hours=None): """ Get logview address of the instance object by hours. :param hours: :return: logview address :rtype: str """ hours = hours or options.log_view_hours project = self.project url = '%s/authorization' % project.resource() policy = { 'expires_in_hours': hours, 'policy': { 'Statement': [{ 'Action': ['odps:Read'], 'Effect': 'Allow', 'Resource': 'acs:odps:*:projects/%s/instances/%s' % \ (project.name, self.id) }], 'Version': '1', } } headers = {'Content-Type': 'application/json'} params = {'sign_bearer_token': ''} data = json.dumps(policy) res = self._client.post(url, data, headers=headers, params=params) content = res.text if six.PY3 else res.content root = ElementTree.fromstring(content) token = root.find('Result').text link = options.log_view_host + "/logview/?h=" + self._client.endpoint + "&p=" \ + project.name + "&i=" + self.id + "&token=" + token return link
def _get_xml_doc(self, endpoint, query, is_post=False): """ Return False if connection could not be made. Otherwise, return a minidom Document. """ response = self._get_response(endpoint, query, is_post=is_post) return minidom.parse(response.text)
Return False if connection could not be made. Otherwise, return a minidom Document.
Below is the the instruction that describes the task: ### Input: Return False if connection could not be made. Otherwise, return a minidom Document. ### Response: def _get_xml_doc(self, endpoint, query, is_post=False): """ Return False if connection could not be made. Otherwise, return a minidom Document. """ response = self._get_response(endpoint, query, is_post=is_post) return minidom.parse(response.text)
def reboot(self): """Reboots the device. Generally one should use this method to reboot the device instead of directly calling `adb.reboot`. Because this method gracefully handles the teardown and restoration of running services. This method is blocking and only returns when the reboot has completed and the services restored. Raises: Error: Waiting for completion timed out. """ if self.is_bootloader: self.fastboot.reboot() return with self.handle_reboot(): self.adb.reboot()
Reboots the device. Generally one should use this method to reboot the device instead of directly calling `adb.reboot`. Because this method gracefully handles the teardown and restoration of running services. This method is blocking and only returns when the reboot has completed and the services restored. Raises: Error: Waiting for completion timed out.
Below is the the instruction that describes the task: ### Input: Reboots the device. Generally one should use this method to reboot the device instead of directly calling `adb.reboot`. Because this method gracefully handles the teardown and restoration of running services. This method is blocking and only returns when the reboot has completed and the services restored. Raises: Error: Waiting for completion timed out. ### Response: def reboot(self): """Reboots the device. Generally one should use this method to reboot the device instead of directly calling `adb.reboot`. Because this method gracefully handles the teardown and restoration of running services. This method is blocking and only returns when the reboot has completed and the services restored. Raises: Error: Waiting for completion timed out. """ if self.is_bootloader: self.fastboot.reboot() return with self.handle_reboot(): self.adb.reboot()
def make_initstr(modname, import_tuples, verbose=False): """ Just creates the string representation. Does no importing. """ imports = [tup[0] for tup in import_tuples] from_imports = __get_from_imports(import_tuples) inject_execstr = _inject_execstr(modname, import_tuples) return _initstr(modname, imports, from_imports, inject_execstr)
Just creates the string representation. Does no importing.
Below is the the instruction that describes the task: ### Input: Just creates the string representation. Does no importing. ### Response: def make_initstr(modname, import_tuples, verbose=False): """ Just creates the string representation. Does no importing. """ imports = [tup[0] for tup in import_tuples] from_imports = __get_from_imports(import_tuples) inject_execstr = _inject_execstr(modname, import_tuples) return _initstr(modname, imports, from_imports, inject_execstr)
def register(self, device, callback): """Register a callback. device: device to be updated by subscription callback: callback for notification of changes """ if not device: logger.error("Received an invalid device: %r", device) return logger.debug("Subscribing to events for %s", device.name) self._devices[device.vera_device_id].append(device) self._callbacks[device].append(callback)
Register a callback. device: device to be updated by subscription callback: callback for notification of changes
Below is the the instruction that describes the task: ### Input: Register a callback. device: device to be updated by subscription callback: callback for notification of changes ### Response: def register(self, device, callback): """Register a callback. device: device to be updated by subscription callback: callback for notification of changes """ if not device: logger.error("Received an invalid device: %r", device) return logger.debug("Subscribing to events for %s", device.name) self._devices[device.vera_device_id].append(device) self._callbacks[device].append(callback)
def lats(self, degrees=True): """ Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians. """ if degrees is False: return _np.radians(self._lats()) else: return self._lats()
Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians.
Below is the the instruction that describes the task: ### Input: Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians. ### Response: def lats(self, degrees=True): """ Return the latitudes of each row of the gridded data. Usage ----- lats = x.lats([degrees]) Returns ------- lats : ndarray, shape (nlat) 1-D numpy array of size nlat containing the latitude of each row of the gridded data. Parameters ------- degrees : bool, optional, default = True If True, the output will be in degrees. If False, the output will be in radians. """ if degrees is False: return _np.radians(self._lats()) else: return self._lats()
def db(self, connection_string=None): """Gets the SQLALchemy session for this request""" connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
Gets the SQLALchemy session for this request
Below is the the instruction that describes the task: ### Input: Gets the SQLALchemy session for this request ### Response: def db(self, connection_string=None): """Gets the SQLALchemy session for this request""" connection_string = connection_string or self.settings["db"] if not hasattr(self, "_db_conns"): self._db_conns = {} if not connection_string in self._db_conns: self._db_conns[connection_string] = oz.sqlalchemy.session(connection_string=connection_string) return self._db_conns[connection_string]
def run_spyder(app, options, args): """ Create and show Spyder's main window Start QApplication event loop """ #TODO: insert here # Main window main = MainWindow(options) try: main.setup() except BaseException: if main.console is not None: try: main.console.shell.exit_interpreter() except BaseException: pass raise main.show() main.post_visible_setup() if main.console: main.console.shell.interpreter.namespace['spy'] = \ Spy(app=app, window=main) # Open external files passed as args if args: for a in args: main.open_external_file(a) # Don't show icons in menus for Mac if sys.platform == 'darwin': QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True) # Open external files with our Mac app if running_in_mac_app(): app.sig_open_external_file.connect(main.open_external_file) # To give focus again to the last focused widget after restoring # the window app.focusChanged.connect(main.change_last_focused_widget) if not running_under_pytest(): app.exec_() return main
Create and show Spyder's main window Start QApplication event loop
Below is the the instruction that describes the task: ### Input: Create and show Spyder's main window Start QApplication event loop ### Response: def run_spyder(app, options, args): """ Create and show Spyder's main window Start QApplication event loop """ #TODO: insert here # Main window main = MainWindow(options) try: main.setup() except BaseException: if main.console is not None: try: main.console.shell.exit_interpreter() except BaseException: pass raise main.show() main.post_visible_setup() if main.console: main.console.shell.interpreter.namespace['spy'] = \ Spy(app=app, window=main) # Open external files passed as args if args: for a in args: main.open_external_file(a) # Don't show icons in menus for Mac if sys.platform == 'darwin': QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True) # Open external files with our Mac app if running_in_mac_app(): app.sig_open_external_file.connect(main.open_external_file) # To give focus again to the last focused widget after restoring # the window app.focusChanged.connect(main.change_last_focused_widget) if not running_under_pytest(): app.exec_() return main
def validate(self, value, model=None, context=None): """ Perform validation """ from boiler.user.services import user_service self_id = None if model: if isinstance(model, dict): self_id = model.get('id') else: self_id = getattr(model, 'id') params = dict() params[self.property] = value found = user_service.first(**params) if not found or (model and self_id == found.id): return Error() return Error(self.error)
Perform validation
Below is the the instruction that describes the task: ### Input: Perform validation ### Response: def validate(self, value, model=None, context=None): """ Perform validation """ from boiler.user.services import user_service self_id = None if model: if isinstance(model, dict): self_id = model.get('id') else: self_id = getattr(model, 'id') params = dict() params[self.property] = value found = user_service.first(**params) if not found or (model and self_id == found.id): return Error() return Error(self.error)
def frange(start, stop, step=1.0): """ Implementation of Python's ``range()`` function which works with floats. Reference to this implementation: https://stackoverflow.com/a/36091634 :param start: start value :type start: float :param stop: end value :type stop: float :param step: increment :type step: float :return: float :rtype: generator """ i = 0.0 x = float(start) # Prevent yielding integers. x0 = x epsilon = step / 2.0 yield x # always yield first value while x + epsilon < stop: i += 1.0 x = x0 + i * step yield x if stop > x: yield stop
Implementation of Python's ``range()`` function which works with floats. Reference to this implementation: https://stackoverflow.com/a/36091634 :param start: start value :type start: float :param stop: end value :type stop: float :param step: increment :type step: float :return: float :rtype: generator
Below is the the instruction that describes the task: ### Input: Implementation of Python's ``range()`` function which works with floats. Reference to this implementation: https://stackoverflow.com/a/36091634 :param start: start value :type start: float :param stop: end value :type stop: float :param step: increment :type step: float :return: float :rtype: generator ### Response: def frange(start, stop, step=1.0): """ Implementation of Python's ``range()`` function which works with floats. Reference to this implementation: https://stackoverflow.com/a/36091634 :param start: start value :type start: float :param stop: end value :type stop: float :param step: increment :type step: float :return: float :rtype: generator """ i = 0.0 x = float(start) # Prevent yielding integers. x0 = x epsilon = step / 2.0 yield x # always yield first value while x + epsilon < stop: i += 1.0 x = x0 + i * step yield x if stop > x: yield stop
def _prepare_source(selector, source): """Normalize source rows and selectors.""" tablename, fields = get_data_specifier(selector) if len(fields) != 1: raise ItsdbError( 'Selector must specify exactly one data column: {}' .format(selector) ) if isinstance(source, TestSuite): if not tablename: tablename = source.relations.find(fields[0])[0] source = source[tablename] cols = list(source.fields.keys()) + fields return source, cols
Normalize source rows and selectors.
Below is the the instruction that describes the task: ### Input: Normalize source rows and selectors. ### Response: def _prepare_source(selector, source): """Normalize source rows and selectors.""" tablename, fields = get_data_specifier(selector) if len(fields) != 1: raise ItsdbError( 'Selector must specify exactly one data column: {}' .format(selector) ) if isinstance(source, TestSuite): if not tablename: tablename = source.relations.find(fields[0])[0] source = source[tablename] cols = list(source.fields.keys()) + fields return source, cols
def bits(self): """ The target architecture word size. One of :class:`Target.Bits`. """ if self._bits is None: value = self._DEFAULT_BITS.get(self.arch) if value is None: raise NotImplementedError('Could not determine the default word size of %s architecture.' % self.arch) return value else: return self._bits
The target architecture word size. One of :class:`Target.Bits`.
Below is the the instruction that describes the task: ### Input: The target architecture word size. One of :class:`Target.Bits`. ### Response: def bits(self): """ The target architecture word size. One of :class:`Target.Bits`. """ if self._bits is None: value = self._DEFAULT_BITS.get(self.arch) if value is None: raise NotImplementedError('Could not determine the default word size of %s architecture.' % self.arch) return value else: return self._bits
def get_wallet_addresses(wallet_name, api_key, is_hd_wallet=False, zero_balance=None, used=None, omit_addresses=False, coin_symbol='btc'): ''' Returns a list of wallet addresses as well as some meta-data ''' assert is_valid_coin_symbol(coin_symbol) assert api_key assert len(wallet_name) <= 25, wallet_name assert zero_balance in (None, True, False) assert used in (None, True, False) assert isinstance(omit_addresses, bool), omit_addresses params = {'token': api_key} kwargs = {'hd/' if is_hd_wallet else '': wallet_name} # hack! url = make_url(coin_symbol, 'wallets', **kwargs) if zero_balance is True: params['zerobalance'] = 'true' elif zero_balance is False: params['zerobalance'] = 'false' if used is True: params['used'] = 'true' elif used is False: params['used'] = 'false' if omit_addresses: params['omitWalletAddresses'] = 'true' r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
Returns a list of wallet addresses as well as some meta-data
Below is the the instruction that describes the task: ### Input: Returns a list of wallet addresses as well as some meta-data ### Response: def get_wallet_addresses(wallet_name, api_key, is_hd_wallet=False, zero_balance=None, used=None, omit_addresses=False, coin_symbol='btc'): ''' Returns a list of wallet addresses as well as some meta-data ''' assert is_valid_coin_symbol(coin_symbol) assert api_key assert len(wallet_name) <= 25, wallet_name assert zero_balance in (None, True, False) assert used in (None, True, False) assert isinstance(omit_addresses, bool), omit_addresses params = {'token': api_key} kwargs = {'hd/' if is_hd_wallet else '': wallet_name} # hack! url = make_url(coin_symbol, 'wallets', **kwargs) if zero_balance is True: params['zerobalance'] = 'true' elif zero_balance is False: params['zerobalance'] = 'false' if used is True: params['used'] = 'true' elif used is False: params['used'] = 'false' if omit_addresses: params['omitWalletAddresses'] = 'true' r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
def QA_util_datetime_to_strdatetime(dt): """ :param dt: pythone datetime.datetime :return: 1999-02-01 09:30:91 string type """ strdatetime = "%04d-%02d-%02d %02d:%02d:%02d" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second ) return strdatetime
:param dt: pythone datetime.datetime :return: 1999-02-01 09:30:91 string type
Below is the the instruction that describes the task: ### Input: :param dt: pythone datetime.datetime :return: 1999-02-01 09:30:91 string type ### Response: def QA_util_datetime_to_strdatetime(dt): """ :param dt: pythone datetime.datetime :return: 1999-02-01 09:30:91 string type """ strdatetime = "%04d-%02d-%02d %02d:%02d:%02d" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second ) return strdatetime
def generate_doc(self, dir_name, vasprun_files): """ Process aflow style runs, where each run is actually a combination of two vasp runs. """ try: fullpath = os.path.abspath(dir_name) # Defensively copy the additional fields first. This is a MUST. # Otherwise, parallel updates will see the same object and inserts # will be overridden!! d = {k: v for k, v in self.additional_fields.items()} d["dir_name"] = fullpath d["schema_version"] = VaspToDbTaskDrone.__version__ d["calculations"] = [ self.process_vasprun(dir_name, taskname, filename) for taskname, filename in vasprun_files.items()] d1 = d["calculations"][0] d2 = d["calculations"][-1] # Now map some useful info to the root level. for root_key in ["completed_at", "nsites", "unit_cell_formula", "reduced_cell_formula", "pretty_formula", "elements", "nelements", "cif", "density", "is_hubbard", "hubbards", "run_type"]: d[root_key] = d2[root_key] d["chemsys"] = "-".join(sorted(d2["elements"])) # store any overrides to the exchange correlation functional xc = d2["input"]["incar"].get("GGA") if xc: xc = xc.upper() d["input"] = {"crystal": d1["input"]["crystal"], "is_lasph": d2["input"]["incar"].get("LASPH", False), "potcar_spec": d1["input"].get("potcar_spec"), "xc_override": xc} vals = sorted(d2["reduced_cell_formula"].values()) d["anonymous_formula"] = {string.ascii_uppercase[i]: float(vals[i]) for i in range(len(vals))} d["output"] = { "crystal": d2["output"]["crystal"], "final_energy": d2["output"]["final_energy"], "final_energy_per_atom": d2["output"]["final_energy_per_atom"]} d["name"] = "aflow" p = d2["input"]["potcar_type"][0].split("_") pot_type = p[0] functional = "lda" if len(pot_type) == 1 else "_".join(p[1:]) d["pseudo_potential"] = {"functional": functional.lower(), "pot_type": pot_type.lower(), "labels": d2["input"]["potcar"]} if len(d["calculations"]) == len(self.runs) or \ list(vasprun_files.keys())[0] != "relax1": d["state"] = "successful" if d2["has_vasp_completed"] \ else "unsuccessful" else: d["state"] = "stopped" d["analysis"] = get_basic_analysis_and_error_checks(d) sg = SpacegroupAnalyzer(Structure.from_dict(d["output"]["crystal"]), 0.1) d["spacegroup"] = {"symbol": sg.get_space_group_symbol(), "number": sg.get_space_group_number(), "point_group": sg.get_point_group_symbol(), "source": "spglib", "crystal_system": sg.get_crystal_system(), "hall": sg.get_hall()} d["oxide_type"] = d2["oxide_type"] d["last_updated"] = datetime.datetime.today() return d except Exception as ex: import traceback print(traceback.format_exc()) logger.error("Error in " + os.path.abspath(dir_name) + ".\n" + traceback.format_exc()) return None
Process aflow style runs, where each run is actually a combination of two vasp runs.
Below is the the instruction that describes the task: ### Input: Process aflow style runs, where each run is actually a combination of two vasp runs. ### Response: def generate_doc(self, dir_name, vasprun_files): """ Process aflow style runs, where each run is actually a combination of two vasp runs. """ try: fullpath = os.path.abspath(dir_name) # Defensively copy the additional fields first. This is a MUST. # Otherwise, parallel updates will see the same object and inserts # will be overridden!! d = {k: v for k, v in self.additional_fields.items()} d["dir_name"] = fullpath d["schema_version"] = VaspToDbTaskDrone.__version__ d["calculations"] = [ self.process_vasprun(dir_name, taskname, filename) for taskname, filename in vasprun_files.items()] d1 = d["calculations"][0] d2 = d["calculations"][-1] # Now map some useful info to the root level. for root_key in ["completed_at", "nsites", "unit_cell_formula", "reduced_cell_formula", "pretty_formula", "elements", "nelements", "cif", "density", "is_hubbard", "hubbards", "run_type"]: d[root_key] = d2[root_key] d["chemsys"] = "-".join(sorted(d2["elements"])) # store any overrides to the exchange correlation functional xc = d2["input"]["incar"].get("GGA") if xc: xc = xc.upper() d["input"] = {"crystal": d1["input"]["crystal"], "is_lasph": d2["input"]["incar"].get("LASPH", False), "potcar_spec": d1["input"].get("potcar_spec"), "xc_override": xc} vals = sorted(d2["reduced_cell_formula"].values()) d["anonymous_formula"] = {string.ascii_uppercase[i]: float(vals[i]) for i in range(len(vals))} d["output"] = { "crystal": d2["output"]["crystal"], "final_energy": d2["output"]["final_energy"], "final_energy_per_atom": d2["output"]["final_energy_per_atom"]} d["name"] = "aflow" p = d2["input"]["potcar_type"][0].split("_") pot_type = p[0] functional = "lda" if len(pot_type) == 1 else "_".join(p[1:]) d["pseudo_potential"] = {"functional": functional.lower(), "pot_type": pot_type.lower(), "labels": d2["input"]["potcar"]} if len(d["calculations"]) == len(self.runs) or \ list(vasprun_files.keys())[0] != "relax1": d["state"] = "successful" if d2["has_vasp_completed"] \ else "unsuccessful" else: d["state"] = "stopped" d["analysis"] = get_basic_analysis_and_error_checks(d) sg = SpacegroupAnalyzer(Structure.from_dict(d["output"]["crystal"]), 0.1) d["spacegroup"] = {"symbol": sg.get_space_group_symbol(), "number": sg.get_space_group_number(), "point_group": sg.get_point_group_symbol(), "source": "spglib", "crystal_system": sg.get_crystal_system(), "hall": sg.get_hall()} d["oxide_type"] = d2["oxide_type"] d["last_updated"] = datetime.datetime.today() return d except Exception as ex: import traceback print(traceback.format_exc()) logger.error("Error in " + os.path.abspath(dir_name) + ".\n" + traceback.format_exc()) return None
def _magic(header, footer, mime, ext=None): """ Discover what type of file it is based on the incoming string """ if not header: raise ValueError("Input was empty") info = _identify_all(header, footer, ext)[0] if mime: return info.mime_type return info.extension if not \ isinstance(info.extension, list) else info[0].extension
Discover what type of file it is based on the incoming string
Below is the the instruction that describes the task: ### Input: Discover what type of file it is based on the incoming string ### Response: def _magic(header, footer, mime, ext=None): """ Discover what type of file it is based on the incoming string """ if not header: raise ValueError("Input was empty") info = _identify_all(header, footer, ext)[0] if mime: return info.mime_type return info.extension if not \ isinstance(info.extension, list) else info[0].extension
def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread.
Below is the the instruction that describes the task: ### Input: read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread. ### Response: def read_namespaced_replica_set(self, name, namespace, **kwargs): # noqa: E501 """read_namespaced_replica_set # noqa: E501 read the specified ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_replica_set(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ReplicaSet (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. :param bool export: Should this value be exported. Export strips fields that a user can not specify. :return: V1beta1ReplicaSet If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.read_namespaced_replica_set_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
def _round(self, **kwargs): """ Subclasses may override this method. """ mathInfo = self._toMathInfo(guidelines=False) mathInfo = mathInfo.round() self._fromMathInfo(mathInfo, guidelines=False)
Subclasses may override this method.
Below is the the instruction that describes the task: ### Input: Subclasses may override this method. ### Response: def _round(self, **kwargs): """ Subclasses may override this method. """ mathInfo = self._toMathInfo(guidelines=False) mathInfo = mathInfo.round() self._fromMathInfo(mathInfo, guidelines=False)
def uri_to_iri_parts(path, query, fragment): r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert. """ path = url_unquote(path, '%/;?') query = url_unquote(query, '%;/?:@&=+,$#') fragment = url_unquote(fragment, '%;/?:@&=+,$#') return path, query, fragment
r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert.
Below is the the instruction that describes the task: ### Input: r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert. ### Response: def uri_to_iri_parts(path, query, fragment): r""" Converts a URI parts to corresponding IRI parts in a given charset. Examples for URI versus IRI: :param path: The path of URI to convert. :param query: The query string of URI to convert. :param fragment: The fragment of URI to convert. """ path = url_unquote(path, '%/;?') query = url_unquote(query, '%;/?:@&=+,$#') fragment = url_unquote(fragment, '%;/?:@&=+,$#') return path, query, fragment
def process(self): """Periodic nonblocking processes""" super(NativeBLEVirtualInterface, self).process() if (not self._stream_sm_running) and (not self.reports.empty()): self._stream_data() if (not self._trace_sm_running) and (not self.traces.empty()): self._send_trace()
Periodic nonblocking processes
Below is the the instruction that describes the task: ### Input: Periodic nonblocking processes ### Response: def process(self): """Periodic nonblocking processes""" super(NativeBLEVirtualInterface, self).process() if (not self._stream_sm_running) and (not self.reports.empty()): self._stream_data() if (not self._trace_sm_running) and (not self.traces.empty()): self._send_trace()
def get(self, request): """ Render a form to collect user input about data sharing consent. """ enterprise_customer_uuid = request.GET.get('enterprise_customer_uuid') success_url = request.GET.get('next') failure_url = request.GET.get('failure_url') course_id = request.GET.get('course_id', '') program_uuid = request.GET.get('program_uuid', '') self.preview_mode = bool(request.GET.get('preview_mode', False)) # Get enterprise_customer to start in case we need to render a custom 404 page # Then go through other business logic to determine (and potentially overwrite) the enterprise customer enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid) context_data = get_global_context(request, enterprise_customer) if not self.preview_mode: if not self.course_or_program_exist(course_id, program_uuid): error_code = 'ENTGDS000' log_message = ( 'Neither the course with course_id: {course_id} ' 'or program with {program_uuid} exist for ' 'enterprise customer {enterprise_customer_uuid}' 'Error code {error_code} presented to user {userid}'.format( course_id=course_id, program_uuid=program_uuid, error_code=error_code, userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: consent_record = get_data_sharing_consent( request.user.username, enterprise_customer_uuid, program_uuid=program_uuid, course_id=course_id ) except NotConnectedToOpenEdX as error: error_code = 'ENTGDS001' log_message = ( 'The was a problem with getting the consent record of user {userid} with ' 'uuid {enterprise_customer_uuid}. get_data_sharing_consent threw ' 'the following NotConnectedToOpenEdX error: {error}' 'for course_id {course_id}.' 'Error code {error_code} presented to user'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, error=error, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: consent_required = consent_record.consent_required() except AttributeError: consent_required = None if consent_record is None or not consent_required: error_code = 'ENTGDS002' log_message = ( 'The was a problem with the consent record of user {userid} with ' 'enterprise_customer_uuid {enterprise_customer_uuid}. consent_record has a value ' 'of {consent_record} and consent_record.consent_required() a ' 'value of {consent_required} for course_id {course_id}. ' 'Error code {error_code} presented to user'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, consent_record=consent_record, consent_required=consent_required, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) else: enterprise_customer = consent_record.enterprise_customer elif not request.user.is_staff: raise PermissionDenied() # Retrieve context data again now that enterprise_customer logic has been run context_data = get_global_context(request, enterprise_customer) if not (enterprise_customer_uuid and success_url and failure_url): error_code = 'ENTGDS003' log_message = ( 'Error: one or more of the following values was falsy: ' 'enterprise_customer_uuid: {enterprise_customer_uuid}, ' 'success_url: {success_url}, ' 'failure_url: {failure_url} for course id {course_id}' 'The following error code was reported to user {userid}: {error_code}'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, success_url=success_url, failure_url=failure_url, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: updated_context_dict = self.get_course_or_program_context( enterprise_customer, course_id=course_id, program_uuid=program_uuid ) context_data.update(updated_context_dict) except Http404: error_code = 'ENTGDS004' log_message = ( 'CourseCatalogApiServiceClient is improperly configured. ' 'Returned error code {error_code} to user {userid} ' 'and enterprise_customer {enterprise_customer} ' 'for course_id {course_id}'.format( error_code=error_code, userid=request.user.id, enterprise_customer=enterprise_customer.uuid, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) item = 'course' if course_id else 'program' # Translators: bold_start and bold_end are HTML tags for specifying enterprise name in bold text. context_data.update({ 'consent_request_prompt': _( 'To access this {item}, you must first consent to share your learning achievements ' 'with {bold_start}{enterprise_customer_name}{bold_end}.' ).format( enterprise_customer_name=enterprise_customer.name, bold_start='<b>', bold_end='</b>', item=item, ), 'confirmation_alert_prompt': _( 'In order to start this {item} and use your discount, {bold_start}you must{bold_end} consent ' 'to share your {item} data with {enterprise_customer_name}.' ).format( enterprise_customer_name=enterprise_customer.name, bold_start='<b>', bold_end='</b>', item=item, ), 'redirect_url': success_url, 'failure_url': failure_url, 'defer_creation': request.GET.get('defer_creation') is not None, 'requested_permissions': [ _('your enrollment in this {item}').format(item=item), _('your learning progress'), _('course completion'), ], 'policy_link_template': '', }) platform_name = context_data['platform_name'] published_only = False if self.preview_mode else True enterprise_consent_page = enterprise_customer.get_data_sharing_consent_text_overrides( published_only=published_only ) if enterprise_consent_page: context_data.update(self.get_context_from_db(enterprise_consent_page, platform_name, item, context_data)) else: context_data.update(self.get_default_context(enterprise_customer, platform_name)) return render(request, 'enterprise/grant_data_sharing_permissions.html', context=context_data)
Render a form to collect user input about data sharing consent.
Below is the the instruction that describes the task: ### Input: Render a form to collect user input about data sharing consent. ### Response: def get(self, request): """ Render a form to collect user input about data sharing consent. """ enterprise_customer_uuid = request.GET.get('enterprise_customer_uuid') success_url = request.GET.get('next') failure_url = request.GET.get('failure_url') course_id = request.GET.get('course_id', '') program_uuid = request.GET.get('program_uuid', '') self.preview_mode = bool(request.GET.get('preview_mode', False)) # Get enterprise_customer to start in case we need to render a custom 404 page # Then go through other business logic to determine (and potentially overwrite) the enterprise customer enterprise_customer = get_enterprise_customer_or_404(enterprise_customer_uuid) context_data = get_global_context(request, enterprise_customer) if not self.preview_mode: if not self.course_or_program_exist(course_id, program_uuid): error_code = 'ENTGDS000' log_message = ( 'Neither the course with course_id: {course_id} ' 'or program with {program_uuid} exist for ' 'enterprise customer {enterprise_customer_uuid}' 'Error code {error_code} presented to user {userid}'.format( course_id=course_id, program_uuid=program_uuid, error_code=error_code, userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: consent_record = get_data_sharing_consent( request.user.username, enterprise_customer_uuid, program_uuid=program_uuid, course_id=course_id ) except NotConnectedToOpenEdX as error: error_code = 'ENTGDS001' log_message = ( 'The was a problem with getting the consent record of user {userid} with ' 'uuid {enterprise_customer_uuid}. get_data_sharing_consent threw ' 'the following NotConnectedToOpenEdX error: {error}' 'for course_id {course_id}.' 'Error code {error_code} presented to user'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, error=error, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: consent_required = consent_record.consent_required() except AttributeError: consent_required = None if consent_record is None or not consent_required: error_code = 'ENTGDS002' log_message = ( 'The was a problem with the consent record of user {userid} with ' 'enterprise_customer_uuid {enterprise_customer_uuid}. consent_record has a value ' 'of {consent_record} and consent_record.consent_required() a ' 'value of {consent_required} for course_id {course_id}. ' 'Error code {error_code} presented to user'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, consent_record=consent_record, consent_required=consent_required, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) else: enterprise_customer = consent_record.enterprise_customer elif not request.user.is_staff: raise PermissionDenied() # Retrieve context data again now that enterprise_customer logic has been run context_data = get_global_context(request, enterprise_customer) if not (enterprise_customer_uuid and success_url and failure_url): error_code = 'ENTGDS003' log_message = ( 'Error: one or more of the following values was falsy: ' 'enterprise_customer_uuid: {enterprise_customer_uuid}, ' 'success_url: {success_url}, ' 'failure_url: {failure_url} for course id {course_id}' 'The following error code was reported to user {userid}: {error_code}'.format( userid=request.user.id, enterprise_customer_uuid=enterprise_customer_uuid, success_url=success_url, failure_url=failure_url, error_code=error_code, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) try: updated_context_dict = self.get_course_or_program_context( enterprise_customer, course_id=course_id, program_uuid=program_uuid ) context_data.update(updated_context_dict) except Http404: error_code = 'ENTGDS004' log_message = ( 'CourseCatalogApiServiceClient is improperly configured. ' 'Returned error code {error_code} to user {userid} ' 'and enterprise_customer {enterprise_customer} ' 'for course_id {course_id}'.format( error_code=error_code, userid=request.user.id, enterprise_customer=enterprise_customer.uuid, course_id=course_id, ) ) return render_page_with_error_code_message(request, context_data, error_code, log_message) item = 'course' if course_id else 'program' # Translators: bold_start and bold_end are HTML tags for specifying enterprise name in bold text. context_data.update({ 'consent_request_prompt': _( 'To access this {item}, you must first consent to share your learning achievements ' 'with {bold_start}{enterprise_customer_name}{bold_end}.' ).format( enterprise_customer_name=enterprise_customer.name, bold_start='<b>', bold_end='</b>', item=item, ), 'confirmation_alert_prompt': _( 'In order to start this {item} and use your discount, {bold_start}you must{bold_end} consent ' 'to share your {item} data with {enterprise_customer_name}.' ).format( enterprise_customer_name=enterprise_customer.name, bold_start='<b>', bold_end='</b>', item=item, ), 'redirect_url': success_url, 'failure_url': failure_url, 'defer_creation': request.GET.get('defer_creation') is not None, 'requested_permissions': [ _('your enrollment in this {item}').format(item=item), _('your learning progress'), _('course completion'), ], 'policy_link_template': '', }) platform_name = context_data['platform_name'] published_only = False if self.preview_mode else True enterprise_consent_page = enterprise_customer.get_data_sharing_consent_text_overrides( published_only=published_only ) if enterprise_consent_page: context_data.update(self.get_context_from_db(enterprise_consent_page, platform_name, item, context_data)) else: context_data.update(self.get_default_context(enterprise_customer, platform_name)) return render(request, 'enterprise/grant_data_sharing_permissions.html', context=context_data)
def timedelta_to_string(timedelta): """ Utility that converts a pandas.Timedelta to a string representation compatible with pandas.Timedelta constructor format Parameters ---------- timedelta: pd.Timedelta Returns ------- string string representation of 'timedelta' """ c = timedelta.components format = '' if c.days != 0: format += '%dD' % c.days if c.hours > 0: format += '%dh' % c.hours if c.minutes > 0: format += '%dm' % c.minutes if c.seconds > 0: format += '%ds' % c.seconds if c.milliseconds > 0: format += '%dms' % c.milliseconds if c.microseconds > 0: format += '%dus' % c.microseconds if c.nanoseconds > 0: format += '%dns' % c.nanoseconds return format
Utility that converts a pandas.Timedelta to a string representation compatible with pandas.Timedelta constructor format Parameters ---------- timedelta: pd.Timedelta Returns ------- string string representation of 'timedelta'
Below is the the instruction that describes the task: ### Input: Utility that converts a pandas.Timedelta to a string representation compatible with pandas.Timedelta constructor format Parameters ---------- timedelta: pd.Timedelta Returns ------- string string representation of 'timedelta' ### Response: def timedelta_to_string(timedelta): """ Utility that converts a pandas.Timedelta to a string representation compatible with pandas.Timedelta constructor format Parameters ---------- timedelta: pd.Timedelta Returns ------- string string representation of 'timedelta' """ c = timedelta.components format = '' if c.days != 0: format += '%dD' % c.days if c.hours > 0: format += '%dh' % c.hours if c.minutes > 0: format += '%dm' % c.minutes if c.seconds > 0: format += '%ds' % c.seconds if c.milliseconds > 0: format += '%dms' % c.milliseconds if c.microseconds > 0: format += '%dus' % c.microseconds if c.nanoseconds > 0: format += '%dns' % c.nanoseconds return format
def prepareToCalcEndOfPrdvP(self): ''' Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. This differs from the baseline case because different savings choices yield different interest rates. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self. ''' KinkBool = self.Rboro > self.Rsave # Boolean indicating that there is actually a kink. # When Rboro == Rsave, this method acts just like it did in IndShock. # When Rboro < Rsave, the solver would have terminated when it was called. # Make a grid of end-of-period assets, including *two* copies of a=0 if KinkBool: aNrmNow = np.sort(np.hstack((np.asarray(self.aXtraGrid) + self.mNrmMinNow, np.array([0.0,0.0])))) else: aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow aXtraCount = aNrmNow.size # Make tiled versions of the assets grid and income shocks ShkCount = self.TranShkValsNext.size aNrm_temp = np.tile(aNrmNow,(ShkCount,1)) PermShkVals_temp = (np.tile(self.PermShkValsNext,(aXtraCount,1))).transpose() TranShkVals_temp = (np.tile(self.TranShkValsNext,(aXtraCount,1))).transpose() ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aXtraCount,1))).transpose() # Make a 1D array of the interest factor at each asset gridpoint Rfree_vec = self.Rsave*np.ones(aXtraCount) if KinkBool: Rfree_vec[0:(np.sum(aNrmNow<=0)-1)] = self.Rboro self.Rfree = Rfree_vec Rfree_temp = np.tile(Rfree_vec,(ShkCount,1)) # Make an array of market resources that we could have next period, # considering the grid of assets and the income shocks that could occur mNrmNext = Rfree_temp/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp # Recalculate the minimum MPC and human wealth using the interest factor on saving. # This overwrites values from setAndUpdateValues, which were based on Rboro instead. if KinkBool: PatFacTop = ((self.Rsave*self.DiscFacEff)**(1.0/self.CRRA))/self.Rsave self.MPCminNow = 1.0/(1.0 + PatFacTop/self.solution_next.MPCmin) self.hNrmNow = self.PermGroFac/self.Rsave*(np.dot(self.ShkPrbsNext, self.TranShkValsNext*self.PermShkValsNext) + self.solution_next.hNrm) # Store some of the constructed arrays for later use and return the assets grid self.PermShkVals_temp = PermShkVals_temp self.ShkPrbs_temp = ShkPrbs_temp self.mNrmNext = mNrmNext self.aNrmNow = aNrmNow return aNrmNow
Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. This differs from the baseline case because different savings choices yield different interest rates. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self.
Below is the the instruction that describes the task: ### Input: Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. This differs from the baseline case because different savings choices yield different interest rates. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self. ### Response: def prepareToCalcEndOfPrdvP(self): ''' Prepare to calculate end-of-period marginal value by creating an array of market resources that the agent could have next period, considering the grid of end-of-period assets and the distribution of shocks he might experience next period. This differs from the baseline case because different savings choices yield different interest rates. Parameters ---------- none Returns ------- aNrmNow : np.array A 1D array of end-of-period assets; also stored as attribute of self. ''' KinkBool = self.Rboro > self.Rsave # Boolean indicating that there is actually a kink. # When Rboro == Rsave, this method acts just like it did in IndShock. # When Rboro < Rsave, the solver would have terminated when it was called. # Make a grid of end-of-period assets, including *two* copies of a=0 if KinkBool: aNrmNow = np.sort(np.hstack((np.asarray(self.aXtraGrid) + self.mNrmMinNow, np.array([0.0,0.0])))) else: aNrmNow = np.asarray(self.aXtraGrid) + self.mNrmMinNow aXtraCount = aNrmNow.size # Make tiled versions of the assets grid and income shocks ShkCount = self.TranShkValsNext.size aNrm_temp = np.tile(aNrmNow,(ShkCount,1)) PermShkVals_temp = (np.tile(self.PermShkValsNext,(aXtraCount,1))).transpose() TranShkVals_temp = (np.tile(self.TranShkValsNext,(aXtraCount,1))).transpose() ShkPrbs_temp = (np.tile(self.ShkPrbsNext,(aXtraCount,1))).transpose() # Make a 1D array of the interest factor at each asset gridpoint Rfree_vec = self.Rsave*np.ones(aXtraCount) if KinkBool: Rfree_vec[0:(np.sum(aNrmNow<=0)-1)] = self.Rboro self.Rfree = Rfree_vec Rfree_temp = np.tile(Rfree_vec,(ShkCount,1)) # Make an array of market resources that we could have next period, # considering the grid of assets and the income shocks that could occur mNrmNext = Rfree_temp/(self.PermGroFac*PermShkVals_temp)*aNrm_temp + TranShkVals_temp # Recalculate the minimum MPC and human wealth using the interest factor on saving. # This overwrites values from setAndUpdateValues, which were based on Rboro instead. if KinkBool: PatFacTop = ((self.Rsave*self.DiscFacEff)**(1.0/self.CRRA))/self.Rsave self.MPCminNow = 1.0/(1.0 + PatFacTop/self.solution_next.MPCmin) self.hNrmNow = self.PermGroFac/self.Rsave*(np.dot(self.ShkPrbsNext, self.TranShkValsNext*self.PermShkValsNext) + self.solution_next.hNrm) # Store some of the constructed arrays for later use and return the assets grid self.PermShkVals_temp = PermShkVals_temp self.ShkPrbs_temp = ShkPrbs_temp self.mNrmNext = mNrmNext self.aNrmNow = aNrmNow return aNrmNow
def file(): """ Grammar for files found in the overall input files. """ return ( Optional(Word(alphanums).setResultsName('alias') + Suppress(Literal('.'))) + Suppress(White()) + Word(approved_printables).setResultsName('filename') )
Grammar for files found in the overall input files.
Below is the the instruction that describes the task: ### Input: Grammar for files found in the overall input files. ### Response: def file(): """ Grammar for files found in the overall input files. """ return ( Optional(Word(alphanums).setResultsName('alias') + Suppress(Literal('.'))) + Suppress(White()) + Word(approved_printables).setResultsName('filename') )
def _node_isomorphic(a, b, check_varprops=True): """ Two Xmrs objects are isomorphic if they have the same structure as determined by variable linkages between preds. """ # first some quick checks a_var_refs = sorted(len(vd['refs']) for vd in a._vars.values()) b_var_refs = sorted(len(vd['refs']) for vd in b._vars.values()) if a_var_refs != b_var_refs: return False print() # these signature: [node] indices are meant to avoid unnecessary # comparisons; they also take care of "semantic feasibility" # constraints (comparing node values and properties). All that's # left is the "syntactic feasibility", or node-edge shapes. # nodedicts are {sig: [(id, edges), ...], ...} a_nd = _node_isomorphic_build_nodedict(a, check_varprops) #print('a', a_nd) b_nd = _node_isomorphic_build_nodedict(b, check_varprops) #print('b', b_nd) #return a_sigs = {} # for node -> sig mapping # don't recurse when things are unique agenda = [] isomap = {} for sig, a_pairs in sorted(a_nd.items(), key=lambda x: len(x[1])): b_pairs = b_nd.get(sig, []) if len(a_pairs) != len(b_pairs): return False if len(a_pairs) == 1: a_, a_edges = a_pairs[0] b_, b_edges = b_pairs[0] if len(a_edges) != len(b_edges): return False a_sigs[a_] = sig isomap[a_] = b_ for edge, a_tgt in a_edges.items(): if edge not in b_edges: return False isomap[a_tgt] = b_edges[edge] else: for a_, ed in a_pairs: a_sigs[a_] = sig agenda.append((a_, sig, ed)) #print(agenda) #return isomaps = _node_isomorphic(agenda, a_sigs, b_nd, isomap, {}) # for sig, a_candidates in sorted(a_nodes.items(), key=lambda x: len(x[1])): # b_candidates = b_nodes.get(sig, []) # if len(a_candidates) != len(b_candidates): return False # candidates.append((a_candidates, b_candidates)) # # nodemaps = _isomorphic(a, b, candidates, {}) try: next(isomaps) return True except StopIteration: return False
Two Xmrs objects are isomorphic if they have the same structure as determined by variable linkages between preds.
Below is the the instruction that describes the task: ### Input: Two Xmrs objects are isomorphic if they have the same structure as determined by variable linkages between preds. ### Response: def _node_isomorphic(a, b, check_varprops=True): """ Two Xmrs objects are isomorphic if they have the same structure as determined by variable linkages between preds. """ # first some quick checks a_var_refs = sorted(len(vd['refs']) for vd in a._vars.values()) b_var_refs = sorted(len(vd['refs']) for vd in b._vars.values()) if a_var_refs != b_var_refs: return False print() # these signature: [node] indices are meant to avoid unnecessary # comparisons; they also take care of "semantic feasibility" # constraints (comparing node values and properties). All that's # left is the "syntactic feasibility", or node-edge shapes. # nodedicts are {sig: [(id, edges), ...], ...} a_nd = _node_isomorphic_build_nodedict(a, check_varprops) #print('a', a_nd) b_nd = _node_isomorphic_build_nodedict(b, check_varprops) #print('b', b_nd) #return a_sigs = {} # for node -> sig mapping # don't recurse when things are unique agenda = [] isomap = {} for sig, a_pairs in sorted(a_nd.items(), key=lambda x: len(x[1])): b_pairs = b_nd.get(sig, []) if len(a_pairs) != len(b_pairs): return False if len(a_pairs) == 1: a_, a_edges = a_pairs[0] b_, b_edges = b_pairs[0] if len(a_edges) != len(b_edges): return False a_sigs[a_] = sig isomap[a_] = b_ for edge, a_tgt in a_edges.items(): if edge not in b_edges: return False isomap[a_tgt] = b_edges[edge] else: for a_, ed in a_pairs: a_sigs[a_] = sig agenda.append((a_, sig, ed)) #print(agenda) #return isomaps = _node_isomorphic(agenda, a_sigs, b_nd, isomap, {}) # for sig, a_candidates in sorted(a_nodes.items(), key=lambda x: len(x[1])): # b_candidates = b_nodes.get(sig, []) # if len(a_candidates) != len(b_candidates): return False # candidates.append((a_candidates, b_candidates)) # # nodemaps = _isomorphic(a, b, candidates, {}) try: next(isomaps) return True except StopIteration: return False
def draw_markers(self, data, coordinates, style, label, mplobj=None): """ Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection """ vertices, pathcodes = style['markerpath'] pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']) pathstyle['dasharray'] = "10,0" for vertex in data: self.draw_path(data=vertices, coordinates="points", pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj)
Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection
Below is the the instruction that describes the task: ### Input: Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection ### Response: def draw_markers(self, data, coordinates, style, label, mplobj=None): """ Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection """ vertices, pathcodes = style['markerpath'] pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']) pathstyle['dasharray'] = "10,0" for vertex in data: self.draw_path(data=vertices, coordinates="points", pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj)
def get(obj): """ Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance """ if not isinstance(obj, bytes): raise TypeError("object type must be bytes") info = { "type": dict(), "extension": dict(), "mime": dict() } stream = " ".join(['{:02X}'.format(byte) for byte in obj]) for element in data: for signature in element["signature"]: offset = element["offset"] * 2 + element["offset"] if signature == stream[offset:len(signature) + offset]: for key in ["type", "extension", "mime"]: info[key][element[key]] = len(signature) for key in ["type", "extension", "mime"]: info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)] return Info(info["type"], info["extension"], info["mime"])
Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance
Below is the the instruction that describes the task: ### Input: Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance ### Response: def get(obj): """ Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance """ if not isinstance(obj, bytes): raise TypeError("object type must be bytes") info = { "type": dict(), "extension": dict(), "mime": dict() } stream = " ".join(['{:02X}'.format(byte) for byte in obj]) for element in data: for signature in element["signature"]: offset = element["offset"] * 2 + element["offset"] if signature == stream[offset:len(signature) + offset]: for key in ["type", "extension", "mime"]: info[key][element[key]] = len(signature) for key in ["type", "extension", "mime"]: info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)] return Info(info["type"], info["extension"], info["mime"])
def generate(env): """Add Builders and construction variables for ar to an Environment.""" as_module.generate(env) env['AS'] = '386asm' env['ASFLAGS'] = SCons.Util.CLVar('') env['ASPPFLAGS'] = '$ASFLAGS' env['ASCOM'] = '$AS $ASFLAGS $SOURCES -o $TARGET' env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $SOURCES -o $TARGET' addPharLapPaths(env)
Add Builders and construction variables for ar to an Environment.
Below is the the instruction that describes the task: ### Input: Add Builders and construction variables for ar to an Environment. ### Response: def generate(env): """Add Builders and construction variables for ar to an Environment.""" as_module.generate(env) env['AS'] = '386asm' env['ASFLAGS'] = SCons.Util.CLVar('') env['ASPPFLAGS'] = '$ASFLAGS' env['ASCOM'] = '$AS $ASFLAGS $SOURCES -o $TARGET' env['ASPPCOM'] = '$CC $ASPPFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS $SOURCES -o $TARGET' addPharLapPaths(env)
def update(self): """ Update all the switch values """ self.states = [bool(int(x)) for x in self.get('port list') or '0000']
Update all the switch values
Below is the the instruction that describes the task: ### Input: Update all the switch values ### Response: def update(self): """ Update all the switch values """ self.states = [bool(int(x)) for x in self.get('port list') or '0000']
def get_type(mime=None, ext=None): """ Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. """ for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None.
Below is the the instruction that describes the task: ### Input: Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. ### Response: def get_type(mime=None, ext=None): """ Returns the file type instance searching by MIME type or file extension. Args: ext: file extension string. E.g: jpg, png, mp4, mp3 mime: MIME string. E.g: image/jpeg, video/mpeg Returns: The matched file type instance. Otherwise None. """ for kind in types: if kind.extension is ext or kind.mime is mime: return kind return None
def handle_prepared_selection_of_core_class_elements(self, core_class, models): """Handles the selection for TreeStore widgets maintaining lists of a specific `core_class` elements If widgets hold a TreeStore with elements of a specific `core_class`, the local selection of that element type is handled by that widget. This method is called to integrate the local selection with the overall selection of the state machine. If no modifier key (indicating to extend the selection) is pressed, the state machine selection is set to the passed selection. If the selection is to be extended, the state machine collection will consist of the widget selection plus all previously selected elements not having the core class `core_class`. :param State | StateElement core_class: The core class of the elements the widget handles :param models: The list of models that are currently being selected locally """ if extend_selection(): self._selected.difference_update(self.get_selected_elements_of_core_class(core_class)) else: self._selected.clear() models = self._check_model_types(models) if len(models) > 1: models = reduce_to_parent_states(models) self._selected.update(models)
Handles the selection for TreeStore widgets maintaining lists of a specific `core_class` elements If widgets hold a TreeStore with elements of a specific `core_class`, the local selection of that element type is handled by that widget. This method is called to integrate the local selection with the overall selection of the state machine. If no modifier key (indicating to extend the selection) is pressed, the state machine selection is set to the passed selection. If the selection is to be extended, the state machine collection will consist of the widget selection plus all previously selected elements not having the core class `core_class`. :param State | StateElement core_class: The core class of the elements the widget handles :param models: The list of models that are currently being selected locally
Below is the the instruction that describes the task: ### Input: Handles the selection for TreeStore widgets maintaining lists of a specific `core_class` elements If widgets hold a TreeStore with elements of a specific `core_class`, the local selection of that element type is handled by that widget. This method is called to integrate the local selection with the overall selection of the state machine. If no modifier key (indicating to extend the selection) is pressed, the state machine selection is set to the passed selection. If the selection is to be extended, the state machine collection will consist of the widget selection plus all previously selected elements not having the core class `core_class`. :param State | StateElement core_class: The core class of the elements the widget handles :param models: The list of models that are currently being selected locally ### Response: def handle_prepared_selection_of_core_class_elements(self, core_class, models): """Handles the selection for TreeStore widgets maintaining lists of a specific `core_class` elements If widgets hold a TreeStore with elements of a specific `core_class`, the local selection of that element type is handled by that widget. This method is called to integrate the local selection with the overall selection of the state machine. If no modifier key (indicating to extend the selection) is pressed, the state machine selection is set to the passed selection. If the selection is to be extended, the state machine collection will consist of the widget selection plus all previously selected elements not having the core class `core_class`. :param State | StateElement core_class: The core class of the elements the widget handles :param models: The list of models that are currently being selected locally """ if extend_selection(): self._selected.difference_update(self.get_selected_elements_of_core_class(core_class)) else: self._selected.clear() models = self._check_model_types(models) if len(models) > 1: models = reduce_to_parent_states(models) self._selected.update(models)
def select(self, df, args, inplace=False): """ After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']} """ if self.prefix is None: raise ValueError('Cannot do selection on an Aggregation without a prefix') # run list_expand and ensure all args to tuples for validation args = [tuple(i) for i in util.list_expand(args)] # check that the args passed are valid for a in args: has_arg = False for argument in self.arguments: if a == tuple(argument[k] for k in self.concat_args): has_arg = True break if not has_arg: raise ValueError('Invalid argument for selection: %s' % str(a)) df = data.select_features( df, exclude=[self.prefix + '_.*'], include=map(lambda a: self.args_prefix(a) + '.*', args), inplace=inplace) return df
After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']}
Below is the the instruction that describes the task: ### Input: After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']} ### Response: def select(self, df, args, inplace=False): """ After joining, selects a subset of arguments df: the result of a call to self.join(left) args: a collcetion of arguments to select, as accepted by drain.util.list_expand: - a tuple corresponding to concat_args, e.g. [('District', '12h'), ('Distict', '24h')] - a dict to be exanded into the above, e.g. {'District': ['12h', '24h']} """ if self.prefix is None: raise ValueError('Cannot do selection on an Aggregation without a prefix') # run list_expand and ensure all args to tuples for validation args = [tuple(i) for i in util.list_expand(args)] # check that the args passed are valid for a in args: has_arg = False for argument in self.arguments: if a == tuple(argument[k] for k in self.concat_args): has_arg = True break if not has_arg: raise ValueError('Invalid argument for selection: %s' % str(a)) df = data.select_features( df, exclude=[self.prefix + '_.*'], include=map(lambda a: self.args_prefix(a) + '.*', args), inplace=inplace) return df
async def connect(self): """ Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything. """ if self.connected or self.is_connecting: return self._is_connecting = True try: logger.info("Connecting to RabbitMQ...") self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters) logger.info("Getting channel...") self._channel = await self._protocol.channel() if self._global_qos is not None: logger.info("Setting prefetch count on connection (%s)", self._global_qos) await self._channel.basic_qos(0, self._global_qos, 1) logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type) await self._channel.exchange(self._exchange_name, self._exchange_type) except (aioamqp.AmqpClosedConnection, Exception): logger.error("Error initializing RabbitMQ connection", exc_info=True) self._is_connecting = False raise exceptions.StreamConnectionError self._is_connecting = False
Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything.
Below is the the instruction that describes the task: ### Input: Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything. ### Response: async def connect(self): """ Create new asynchronous connection to the RabbitMQ instance. This will connect, declare exchange and bind itself to the configured queue. After that, client is ready to publish or consume messages. :return: Does not return anything. """ if self.connected or self.is_connecting: return self._is_connecting = True try: logger.info("Connecting to RabbitMQ...") self._transport, self._protocol = await aioamqp.connect(**self._connection_parameters) logger.info("Getting channel...") self._channel = await self._protocol.channel() if self._global_qos is not None: logger.info("Setting prefetch count on connection (%s)", self._global_qos) await self._channel.basic_qos(0, self._global_qos, 1) logger.info("Connecting to exchange '%s (%s)'", self._exchange_name, self._exchange_type) await self._channel.exchange(self._exchange_name, self._exchange_type) except (aioamqp.AmqpClosedConnection, Exception): logger.error("Error initializing RabbitMQ connection", exc_info=True) self._is_connecting = False raise exceptions.StreamConnectionError self._is_connecting = False
def color_validator(input_str): """ A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise. """ try: assert input_str in VALID_OVERRIDE_COLORS + [''] return input_str except AssertionError: raise ValidationError( 'Expected colors are: ' + ', '.join(color for color in VALID_OVERRIDE_COLORS) + '. (Ctrl-C to exit)\n')
A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise.
Below is the the instruction that describes the task: ### Input: A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise. ### Response: def color_validator(input_str): """ A filter allowing only the particular colors used by the Google Calendar API Raises ValidationError otherwise. """ try: assert input_str in VALID_OVERRIDE_COLORS + [''] return input_str except AssertionError: raise ValidationError( 'Expected colors are: ' + ', '.join(color for color in VALID_OVERRIDE_COLORS) + '. (Ctrl-C to exit)\n')
def ecdsa_recover_compact(msg32, sig): """ Takes the a message and a parsed recoverable signature Returns the serialized public key from the private key in the sign function """ assert isinstance(msg32, bytes) assert len(msg32) == 32 _check_signature(sig) # Check that recid is of valid value recid = ord(sig[64:65]) if not (recid >= 0 and recid <= 3): raise InvalidSignatureError() # Setting the pubkey array pubkey = ffi.new("secp256k1_pubkey *") lib.secp256k1_ecdsa_recover( ctx, pubkey, _parse_to_recoverable_signature(sig), msg32 ) serialized_pubkey = _serialize_pubkey(pubkey) buf = ffi.buffer(serialized_pubkey, 65) r = buf[:] assert isinstance(r, bytes) assert len(r) == 65, len(r) return r
Takes the a message and a parsed recoverable signature Returns the serialized public key from the private key in the sign function
Below is the the instruction that describes the task: ### Input: Takes the a message and a parsed recoverable signature Returns the serialized public key from the private key in the sign function ### Response: def ecdsa_recover_compact(msg32, sig): """ Takes the a message and a parsed recoverable signature Returns the serialized public key from the private key in the sign function """ assert isinstance(msg32, bytes) assert len(msg32) == 32 _check_signature(sig) # Check that recid is of valid value recid = ord(sig[64:65]) if not (recid >= 0 and recid <= 3): raise InvalidSignatureError() # Setting the pubkey array pubkey = ffi.new("secp256k1_pubkey *") lib.secp256k1_ecdsa_recover( ctx, pubkey, _parse_to_recoverable_signature(sig), msg32 ) serialized_pubkey = _serialize_pubkey(pubkey) buf = ffi.buffer(serialized_pubkey, 65) r = buf[:] assert isinstance(r, bytes) assert len(r) == 65, len(r) return r
def elements_as_text(self, elems, indent=2): """ Returns the elems dictionary as formatted plain text. """ assert elems text = "" for elename, eledesc in elems.items(): if isinstance(eledesc, dict): desc_txt = self.elements_as_text(eledesc, indent + 2) desc_txt = ''.join(['\n', desc_txt]) elif isinstance(eledesc, str): desc_txt = ''.join([eledesc, '\n']) else: assert False, "Only string or dictionary" ele_txt = "\t{0}{1: <22} {2}".format(' ' * indent, elename, desc_txt) text = ''.join([text, ele_txt]) return text
Returns the elems dictionary as formatted plain text.
Below is the the instruction that describes the task: ### Input: Returns the elems dictionary as formatted plain text. ### Response: def elements_as_text(self, elems, indent=2): """ Returns the elems dictionary as formatted plain text. """ assert elems text = "" for elename, eledesc in elems.items(): if isinstance(eledesc, dict): desc_txt = self.elements_as_text(eledesc, indent + 2) desc_txt = ''.join(['\n', desc_txt]) elif isinstance(eledesc, str): desc_txt = ''.join([eledesc, '\n']) else: assert False, "Only string or dictionary" ele_txt = "\t{0}{1: <22} {2}".format(' ' * indent, elename, desc_txt) text = ''.join([text, ele_txt]) return text
def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to
Below is the the instruction that describes the task: ### Input: Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to ### Response: def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
def set_permission(self, path, **kwargs): """Set permission of a path. :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal """ response = self._put(path, 'SETPERMISSION', **kwargs) assert not response.content
Set permission of a path. :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal
Below is the the instruction that describes the task: ### Input: Set permission of a path. :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal ### Response: def set_permission(self, path, **kwargs): """Set permission of a path. :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal """ response = self._put(path, 'SETPERMISSION', **kwargs) assert not response.content
def clear(self): """ Cleans up the manager. The manager can't be used after this method has been called """ self.services.clear() self.services = None self._future_value = None super(AggregateDependency, self).clear()
Cleans up the manager. The manager can't be used after this method has been called
Below is the the instruction that describes the task: ### Input: Cleans up the manager. The manager can't be used after this method has been called ### Response: def clear(self): """ Cleans up the manager. The manager can't be used after this method has been called """ self.services.clear() self.services = None self._future_value = None super(AggregateDependency, self).clear()
def delete_offline_reports(self): """ Delete all stored offline reports :return: List of reports that still require submission """ reports = self.get_offline_reports() remaining_reports = reports[:] for report in reports: with open(report, 'r') as _f: try: js = json.load(_f) except ValueError as e: logging.error("%s. Deleting crash report.") os.remove(report) continue if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'): # Only delete the reports which have been sent or who's upload method is disabled. remaining_reports.remove(report) try: os.remove(report) except OSError as e: logging.error(e) self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports)) return remaining_reports
Delete all stored offline reports :return: List of reports that still require submission
Below is the the instruction that describes the task: ### Input: Delete all stored offline reports :return: List of reports that still require submission ### Response: def delete_offline_reports(self): """ Delete all stored offline reports :return: List of reports that still require submission """ reports = self.get_offline_reports() remaining_reports = reports[:] for report in reports: with open(report, 'r') as _f: try: js = json.load(_f) except ValueError as e: logging.error("%s. Deleting crash report.") os.remove(report) continue if js['SMTP Submission'] in ('Sent', 'Disabled') and js['HQ Submission'] in ('Sent', 'Disabled'): # Only delete the reports which have been sent or who's upload method is disabled. remaining_reports.remove(report) try: os.remove(report) except OSError as e: logging.error(e) self.logger.info('CrashReporter: Deleting offline reports. %d reports remaining.' % len(remaining_reports)) return remaining_reports
def create_server(self, server): """ Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012" """ if isinstance(server, Server): body = server.prepare_post_body() else: server = Server._create_server_obj(server, cloud_manager=self) body = server.prepare_post_body() res = self.post_request('/server', body) server_to_return = server server_to_return._reset( res['server'], cloud_manager=self, populated=True ) return server_to_return
Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012"
Below is the the instruction that describes the task: ### Input: Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012" ### Response: def create_server(self, server): """ Create a server and its storages based on a (locally created) Server object. Populates the given Server instance with the API response. 0.3.0: also supports giving the entire POST body as a dict that is directly serialised into JSON. Refer to the REST API documentation for correct format. Example: server1 = Server( core_number = 1, memory_amount = 1024, hostname = "my.example.1", zone = ZONE.London, storage_devices = [ Storage(os = "Ubuntu 14.04", size=10, tier=maxiops, title='The OS drive'), Storage(size=10), Storage() title = "My Example Server" ]) manager.create_server(server1) One storage should contain an OS. Otherwise storage fields are optional. - size defaults to 10, - title defaults to hostname + " OS disk" and hostname + " storage disk id" (id is a running starting from 1) - tier defaults to maxiops - valid operating systems are: "CentOS 6.5", "CentOS 7.0" "Debian 7.8" "Ubuntu 12.04", "Ubuntu 14.04" "Windows 2003","Windows 2008" ,"Windows 2012" """ if isinstance(server, Server): body = server.prepare_post_body() else: server = Server._create_server_obj(server, cloud_manager=self) body = server.prepare_post_body() res = self.post_request('/server', body) server_to_return = server server_to_return._reset( res['server'], cloud_manager=self, populated=True ) return server_to_return
def Contains(self, other): """Checks if the passed parameter is in the range of this object. """ if other is None: raise ValueError("other is None.") if isinstance(other, Range): if other.low >= self.low and other.high <= self.high: return True return False else: return self.Contains(Range(other, other))
Checks if the passed parameter is in the range of this object.
Below is the the instruction that describes the task: ### Input: Checks if the passed parameter is in the range of this object. ### Response: def Contains(self, other): """Checks if the passed parameter is in the range of this object. """ if other is None: raise ValueError("other is None.") if isinstance(other, Range): if other.low >= self.low and other.high <= self.high: return True return False else: return self.Contains(Range(other, other))
async def Restore(self, backup_id): ''' backup_id : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Backups', request='Restore', version=1, params=_params) _params['backup-id'] = backup_id reply = await self.rpc(msg) return reply
backup_id : str Returns -> None
Below is the the instruction that describes the task: ### Input: backup_id : str Returns -> None ### Response: async def Restore(self, backup_id): ''' backup_id : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Backups', request='Restore', version=1, params=_params) _params['backup-id'] = backup_id reply = await self.rpc(msg) return reply
def thumbnail_url(source, alias): """ Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt=""> """ try: thumb = get_thumbnailer(source)[alias] except Exception: return '' return thumb.url
Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt="">
Below is the the instruction that describes the task: ### Input: Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt=""> ### Response: def thumbnail_url(source, alias): """ Return the thumbnail url for a source file using an aliased set of thumbnail options. If no matching alias is found, returns an empty string. Example usage:: <img src="{{ person.photo|thumbnail_url:'small' }}" alt=""> """ try: thumb = get_thumbnailer(source)[alias] except Exception: return '' return thumb.url
def getParams(self): """ get params """ rv = np.array([]) if self.n_terms>0: rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)]) return rv
get params
Below is the the instruction that describes the task: ### Input: get params ### Response: def getParams(self): """ get params """ rv = np.array([]) if self.n_terms>0: rv = np.concatenate([np.reshape(self.B[term_i],self.B[term_i].size, order='F') for term_i in range(self.n_terms)]) return rv
def get_shm_volumes(): """ Mount an tmpfs volume to /dev/shm. This will set /dev/shm size to half of the RAM of node. By default, /dev/shm is very small, only 64MB. Some experiments will fail due to lack of share memory, such as some experiments running on Pytorch. """ volumes, volume_mounts = [], [] shm_volume = client.V1Volume( name=constants.SHM_VOLUME, empty_dir=client.V1EmptyDirVolumeSource(medium='Memory') ) volumes.append(shm_volume) shm_volume_mount = client.V1VolumeMount(name=shm_volume.name, mount_path='/dev/shm') volume_mounts.append(shm_volume_mount) return volumes, volume_mounts
Mount an tmpfs volume to /dev/shm. This will set /dev/shm size to half of the RAM of node. By default, /dev/shm is very small, only 64MB. Some experiments will fail due to lack of share memory, such as some experiments running on Pytorch.
Below is the the instruction that describes the task: ### Input: Mount an tmpfs volume to /dev/shm. This will set /dev/shm size to half of the RAM of node. By default, /dev/shm is very small, only 64MB. Some experiments will fail due to lack of share memory, such as some experiments running on Pytorch. ### Response: def get_shm_volumes(): """ Mount an tmpfs volume to /dev/shm. This will set /dev/shm size to half of the RAM of node. By default, /dev/shm is very small, only 64MB. Some experiments will fail due to lack of share memory, such as some experiments running on Pytorch. """ volumes, volume_mounts = [], [] shm_volume = client.V1Volume( name=constants.SHM_VOLUME, empty_dir=client.V1EmptyDirVolumeSource(medium='Memory') ) volumes.append(shm_volume) shm_volume_mount = client.V1VolumeMount(name=shm_volume.name, mount_path='/dev/shm') volume_mounts.append(shm_volume_mount) return volumes, volume_mounts
def char(self): """ Read one character from the stream or queue if available. Return EOF when EOF is reached. """ # Read a new chunk from the input stream if necessary if self.chunkOffset >= self.chunkSize: if not self.readChunk(): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = chunkOffset + 1 return char
Read one character from the stream or queue if available. Return EOF when EOF is reached.
Below is the the instruction that describes the task: ### Input: Read one character from the stream or queue if available. Return EOF when EOF is reached. ### Response: def char(self): """ Read one character from the stream or queue if available. Return EOF when EOF is reached. """ # Read a new chunk from the input stream if necessary if self.chunkOffset >= self.chunkSize: if not self.readChunk(): return EOF chunkOffset = self.chunkOffset char = self.chunk[chunkOffset] self.chunkOffset = chunkOffset + 1 return char
def to_python(self, value): """ Validates that the input can be converted to a time. Returns a Python datetime.time object. """ if value in validators.EMPTY_VALUES: return None if isinstance(value, datetime.datetime): return value.time() if isinstance(value, datetime.time): return value if isinstance(value, list): # Input comes from a 2 SplitTimeWidgets, for example. So, it's two # components: start time and end time. if len(value) != 2: raise ValidationError(self.error_messages['invalid']) if value[0] in validators.EMPTY_VALUES and value[1] in \ validators.EMPTY_VALUES: return None start_value = value[0] end_value = value[1] start_time = None end_time = None for format in self.input_formats or formats.get_format(\ 'TIME_INPUT_FORMATS'): try: start_time = datetime.datetime( *time.strptime(start_value, format)[:6] ).time() except ValueError: if start_time: continue else: raise ValidationError(self.error_messages['invalid']) for format in self.input_formats or formats.get_format(\ 'TIME_INPUT_FORMATS'): try: end_time = datetime.datetime( *time.strptime(end_value, format)[:6] ).time() except ValueError: if end_time: continue else: raise ValidationError(self.error_messages['invalid']) return (start_time, end_time)
Validates that the input can be converted to a time. Returns a Python datetime.time object.
Below is the the instruction that describes the task: ### Input: Validates that the input can be converted to a time. Returns a Python datetime.time object. ### Response: def to_python(self, value): """ Validates that the input can be converted to a time. Returns a Python datetime.time object. """ if value in validators.EMPTY_VALUES: return None if isinstance(value, datetime.datetime): return value.time() if isinstance(value, datetime.time): return value if isinstance(value, list): # Input comes from a 2 SplitTimeWidgets, for example. So, it's two # components: start time and end time. if len(value) != 2: raise ValidationError(self.error_messages['invalid']) if value[0] in validators.EMPTY_VALUES and value[1] in \ validators.EMPTY_VALUES: return None start_value = value[0] end_value = value[1] start_time = None end_time = None for format in self.input_formats or formats.get_format(\ 'TIME_INPUT_FORMATS'): try: start_time = datetime.datetime( *time.strptime(start_value, format)[:6] ).time() except ValueError: if start_time: continue else: raise ValidationError(self.error_messages['invalid']) for format in self.input_formats or formats.get_format(\ 'TIME_INPUT_FORMATS'): try: end_time = datetime.datetime( *time.strptime(end_value, format)[:6] ).time() except ValueError: if end_time: continue else: raise ValidationError(self.error_messages['invalid']) return (start_time, end_time)
def get_languages(self, include_main=False): """ Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: """ if not self.master: raise Exception('TransManager - No master set') item_languages = self.get_languages_from_item(self.ct_master, self.master) languages = self.get_languages_from_model(self.ct_master.app_label, self.ct_master.model) if not languages: languages = self.get_languages_from_application(self.ct_master.app_label) # if not languages: # languages = self.get_languages_default() if not include_main: main_language = self.get_main_language() if main_language in languages: languages.remove(main_language) return list(set(item_languages + languages))
Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return:
Below is the the instruction that describes the task: ### Input: Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: ### Response: def get_languages(self, include_main=False): """ Get all the languages except the main. Try to get in order: 1.- item languages 2.- model languages 3.- application model languages # 4.- default languages :param master: :param include_main: :return: """ if not self.master: raise Exception('TransManager - No master set') item_languages = self.get_languages_from_item(self.ct_master, self.master) languages = self.get_languages_from_model(self.ct_master.app_label, self.ct_master.model) if not languages: languages = self.get_languages_from_application(self.ct_master.app_label) # if not languages: # languages = self.get_languages_default() if not include_main: main_language = self.get_main_language() if main_language in languages: languages.remove(main_language) return list(set(item_languages + languages))
def requests_for_variant(self, request, variant_id=None): """Get all the requests for a single variant """ requests = ProductRequest.objects.filter(variant__id=variant_id) serializer = self.serializer_class(requests, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK)
Get all the requests for a single variant
Below is the the instruction that describes the task: ### Input: Get all the requests for a single variant ### Response: def requests_for_variant(self, request, variant_id=None): """Get all the requests for a single variant """ requests = ProductRequest.objects.filter(variant__id=variant_id) serializer = self.serializer_class(requests, many=True) return Response(data=serializer.data, status=status.HTTP_200_OK)
def _slider_changed(self, n): """ updates the colormap / plot """ self._button_save.setEnabled(True) self.modify_colorpoint(n, self._sliders[n].value()*0.001, self._colorpoint_list[n][1], self._colorpoint_list[n][2])
updates the colormap / plot
Below is the the instruction that describes the task: ### Input: updates the colormap / plot ### Response: def _slider_changed(self, n): """ updates the colormap / plot """ self._button_save.setEnabled(True) self.modify_colorpoint(n, self._sliders[n].value()*0.001, self._colorpoint_list[n][1], self._colorpoint_list[n][2])
def where(self, code): """Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter """ self.__check_okay_to_chain() if not isinstance(code, Code): code = Code(code) self.__spec["$where"] = code return self
Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter
Below is the the instruction that describes the task: ### Input: Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter ### Response: def where(self, code): """Adds a $where clause to this query. The `code` argument must be an instance of :class:`basestring` (:class:`str` in python 3) or :class:`~bson.code.Code` containing a JavaScript expression. This expression will be evaluated for each document scanned. Only those documents for which the expression evaluates to *true* will be returned as results. The keyword *this* refers to the object currently being scanned. Raises :class:`TypeError` if `code` is not an instance of :class:`basestring` (:class:`str` in python 3). Raises :class:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last call to :meth:`where` applied to a :class:`Cursor` has any effect. :Parameters: - `code`: JavaScript expression to use as a filter """ self.__check_okay_to_chain() if not isinstance(code, Code): code = Code(code) self.__spec["$where"] = code return self
def add_left_space(self, nspace=1): """elem_add n cols of spaces before the first col. (for texttable 0.8.3)""" sp = ' ' * nspace for item in self._rows: item[0] = sp + item[0]
elem_add n cols of spaces before the first col. (for texttable 0.8.3)
Below is the the instruction that describes the task: ### Input: elem_add n cols of spaces before the first col. (for texttable 0.8.3) ### Response: def add_left_space(self, nspace=1): """elem_add n cols of spaces before the first col. (for texttable 0.8.3)""" sp = ' ' * nspace for item in self._rows: item[0] = sp + item[0]
def admin_keywords_submit(request): """ Adds any new given keywords from the custom keywords field in the admin, and returns their IDs for use when saving a model with a keywords field. """ keyword_ids, titles = [], [] remove = punctuation.replace("-", "") # Strip punctuation, allow dashes. for title in request.POST.get("text_keywords", "").split(","): title = "".join([c for c in title if c not in remove]).strip() if title: kw, created = Keyword.objects.get_or_create_iexact(title=title) keyword_id = str(kw.id) if keyword_id not in keyword_ids: keyword_ids.append(keyword_id) titles.append(title) return HttpResponse("%s|%s" % (",".join(keyword_ids), ", ".join(titles)), content_type='text/plain')
Adds any new given keywords from the custom keywords field in the admin, and returns their IDs for use when saving a model with a keywords field.
Below is the the instruction that describes the task: ### Input: Adds any new given keywords from the custom keywords field in the admin, and returns their IDs for use when saving a model with a keywords field. ### Response: def admin_keywords_submit(request): """ Adds any new given keywords from the custom keywords field in the admin, and returns their IDs for use when saving a model with a keywords field. """ keyword_ids, titles = [], [] remove = punctuation.replace("-", "") # Strip punctuation, allow dashes. for title in request.POST.get("text_keywords", "").split(","): title = "".join([c for c in title if c not in remove]).strip() if title: kw, created = Keyword.objects.get_or_create_iexact(title=title) keyword_id = str(kw.id) if keyword_id not in keyword_ids: keyword_ids.append(keyword_id) titles.append(title) return HttpResponse("%s|%s" % (",".join(keyword_ids), ", ".join(titles)), content_type='text/plain')
def fasta2bed(fastafile): """ Alternative BED generation from FASTA file. Used for sanity check. """ dustfasta = fastafile.rsplit(".", 1)[0] + ".dust.fasta" for name, seq in parse_fasta(dustfasta): for islower, ss in groupby(enumerate(seq), key=lambda x: x[-1].islower()): if not islower: continue ss = list(ss) ms, mn = min(ss) xs, xn = max(ss) print("\t".join(str(x) for x in (name, ms, xs)))
Alternative BED generation from FASTA file. Used for sanity check.
Below is the the instruction that describes the task: ### Input: Alternative BED generation from FASTA file. Used for sanity check. ### Response: def fasta2bed(fastafile): """ Alternative BED generation from FASTA file. Used for sanity check. """ dustfasta = fastafile.rsplit(".", 1)[0] + ".dust.fasta" for name, seq in parse_fasta(dustfasta): for islower, ss in groupby(enumerate(seq), key=lambda x: x[-1].islower()): if not islower: continue ss = list(ss) ms, mn = min(ss) xs, xn = max(ss) print("\t".join(str(x) for x in (name, ms, xs)))
def get_words(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. """ pair = self.f.getVocab(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0]
Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords.
Below is the the instruction that describes the task: ### Input: Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. ### Response: def get_words(self, include_freq=False, on_unicode_error='strict'): """ Get the entire list of words of the dictionary optionally including the frequency of the individual words. This does not include any subwords. For that please consult the function get_subwords. """ pair = self.f.getVocab(on_unicode_error) if include_freq: return (pair[0], np.array(pair[1])) else: return pair[0]
def reload(self): """ allow to refresh the document, so after using update(), it could reload its value from the database. Be carreful : reload() will erase all unsaved values. If no _id is set in the document, a KeyError is raised. """ old_doc = self.mongokat_collection.find_one({"_id": self['_id']}, read_use="primary") if not old_doc: raise OperationFailure('Can not reload an unsaved document.' ' %s is not found in the database. Maybe _id was a string and not ObjectId?' % self['_id']) else: for k in list(self.keys()): del self[k] self.update(dotdict(old_doc)) self._initialized_with_doc = False
allow to refresh the document, so after using update(), it could reload its value from the database. Be carreful : reload() will erase all unsaved values. If no _id is set in the document, a KeyError is raised.
Below is the the instruction that describes the task: ### Input: allow to refresh the document, so after using update(), it could reload its value from the database. Be carreful : reload() will erase all unsaved values. If no _id is set in the document, a KeyError is raised. ### Response: def reload(self): """ allow to refresh the document, so after using update(), it could reload its value from the database. Be carreful : reload() will erase all unsaved values. If no _id is set in the document, a KeyError is raised. """ old_doc = self.mongokat_collection.find_one({"_id": self['_id']}, read_use="primary") if not old_doc: raise OperationFailure('Can not reload an unsaved document.' ' %s is not found in the database. Maybe _id was a string and not ObjectId?' % self['_id']) else: for k in list(self.keys()): del self[k] self.update(dotdict(old_doc)) self._initialized_with_doc = False
def run_as(self, identifiers): """ :type identifiers: subject_abcs.IdentifierCollection """ if (not self.has_identifiers): msg = ("This subject does not yet have an identity. Assuming the " "identity of another Subject is only allowed for Subjects " "with an existing identity. Try logging this subject in " "first, or using the DelegatingSubject.Builder " "to build ad hoc Subject instances with identities as " "necessary.") raise ValueError(msg) self.push_identity(identifiers)
:type identifiers: subject_abcs.IdentifierCollection
Below is the the instruction that describes the task: ### Input: :type identifiers: subject_abcs.IdentifierCollection ### Response: def run_as(self, identifiers): """ :type identifiers: subject_abcs.IdentifierCollection """ if (not self.has_identifiers): msg = ("This subject does not yet have an identity. Assuming the " "identity of another Subject is only allowed for Subjects " "with an existing identity. Try logging this subject in " "first, or using the DelegatingSubject.Builder " "to build ad hoc Subject instances with identities as " "necessary.") raise ValueError(msg) self.push_identity(identifiers)
def _board_from_game_image(self, game_image): """Return a board object matching the board in the game image. Return None if any tiles are not identified. """ # board image board_rect = self._board_tools['board_region'].region_in(game_image) t, l, b, r = board_rect board_image = game_image[t:b, l:r] # board grid and tiles --> fill in a Board object board = Board() grid = self._board_tools['grid'] tile_id = self._board_tools['tile_id'] for p, borders in grid.borders_by_grid_position(board_image): t, l, b, r = borders tile = board_image[t:b, l:r] tile_character = tile_id.identify(tile) if tile_character is None: return None # soft failure board[p] = Tile.singleton(tile_character) return board
Return a board object matching the board in the game image. Return None if any tiles are not identified.
Below is the the instruction that describes the task: ### Input: Return a board object matching the board in the game image. Return None if any tiles are not identified. ### Response: def _board_from_game_image(self, game_image): """Return a board object matching the board in the game image. Return None if any tiles are not identified. """ # board image board_rect = self._board_tools['board_region'].region_in(game_image) t, l, b, r = board_rect board_image = game_image[t:b, l:r] # board grid and tiles --> fill in a Board object board = Board() grid = self._board_tools['grid'] tile_id = self._board_tools['tile_id'] for p, borders in grid.borders_by_grid_position(board_image): t, l, b, r = borders tile = board_image[t:b, l:r] tile_character = tile_id.identify(tile) if tile_character is None: return None # soft failure board[p] = Tile.singleton(tile_character) return board
def get_all_objects(self): "Return pointers to all GC tracked objects" for i, generation in enumerate(self.gc_generations): generation_head_ptr = pygc_head_ptr = generation.head.get_pointer() generation_head_addr = generation_head_ptr._value while True: # _PyObjectBase_GC_UNTRACK macro says that # gc_prev always points to some value # there is still a race condition if PyGC_Head # gets free'd and overwritten just before we look # at him pygc_head_ptr = pygc_head_ptr.deref().gc_next if pygc_head_ptr._value == generation_head_addr: break yield pygc_head_ptr.deref().get_object_ptr()
Return pointers to all GC tracked objects
Below is the the instruction that describes the task: ### Input: Return pointers to all GC tracked objects ### Response: def get_all_objects(self): "Return pointers to all GC tracked objects" for i, generation in enumerate(self.gc_generations): generation_head_ptr = pygc_head_ptr = generation.head.get_pointer() generation_head_addr = generation_head_ptr._value while True: # _PyObjectBase_GC_UNTRACK macro says that # gc_prev always points to some value # there is still a race condition if PyGC_Head # gets free'd and overwritten just before we look # at him pygc_head_ptr = pygc_head_ptr.deref().gc_next if pygc_head_ptr._value == generation_head_addr: break yield pygc_head_ptr.deref().get_object_ptr()
def fit_transform(self, X, y=None, **fit_params): """Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ self._validate_transformers() with Pool(self.n_jobs) as pool: result = pool.starmap(_fit_transform_one, ((trans, weight, X[trans['col_pick']] if hasattr(trans, 'col_pick') else X, y) for name, trans, weight in self._iter())) if not result: # All transformers are None return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if self.concatenate: if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs
Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers.
Below is the the instruction that describes the task: ### Input: Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. ### Response: def fit_transform(self, X, y=None, **fit_params): """Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ self._validate_transformers() with Pool(self.n_jobs) as pool: result = pool.starmap(_fit_transform_one, ((trans, weight, X[trans['col_pick']] if hasattr(trans, 'col_pick') else X, y) for name, trans, weight in self._iter())) if not result: # All transformers are None return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if self.concatenate: if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs
def on_get(resc, req, resp): """ Get the models identified by query parameters We return an empty list if no models are found. """ signals.pre_req.send(resc.model) signals.pre_req_search.send(resc.model) models = goldman.sess.store.search(resc.rtype, **{ 'filters': req.filters, 'pages': req.pages, 'sorts': req.sorts, }) props = to_rest_models(models, includes=req.includes) resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_search.send(resc.model)
Get the models identified by query parameters We return an empty list if no models are found.
Below is the the instruction that describes the task: ### Input: Get the models identified by query parameters We return an empty list if no models are found. ### Response: def on_get(resc, req, resp): """ Get the models identified by query parameters We return an empty list if no models are found. """ signals.pre_req.send(resc.model) signals.pre_req_search.send(resc.model) models = goldman.sess.store.search(resc.rtype, **{ 'filters': req.filters, 'pages': req.pages, 'sorts': req.sorts, }) props = to_rest_models(models, includes=req.includes) resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_search.send(resc.model)
def create_entity_class(self): ''' Creates a class-based entity with fixed values, using all of the supported data types. ''' entity = Entity() # Partition key and row key must be strings and are required entity.PartitionKey = 'pk{}'.format(str(uuid.uuid4()).replace('-', '')) entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) # Some basic types are inferred entity.age = 39 # EdmType.INT64 entity.large = 933311100 # EdmType.INT64 entity.sex = 'male' # EdmType.STRING entity.married = True # EdmType.BOOLEAN entity.ratio = 3.1 # EdmType.DOUBLE entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME # Binary, Int32 and GUID must be explicitly typed entity.binary = EntityProperty(EdmType.BINARY, b'xyz') entity.other = EntityProperty(EdmType.INT32, 20) entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity
Creates a class-based entity with fixed values, using all of the supported data types.
Below is the the instruction that describes the task: ### Input: Creates a class-based entity with fixed values, using all of the supported data types. ### Response: def create_entity_class(self): ''' Creates a class-based entity with fixed values, using all of the supported data types. ''' entity = Entity() # Partition key and row key must be strings and are required entity.PartitionKey = 'pk{}'.format(str(uuid.uuid4()).replace('-', '')) entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', '')) # Some basic types are inferred entity.age = 39 # EdmType.INT64 entity.large = 933311100 # EdmType.INT64 entity.sex = 'male' # EdmType.STRING entity.married = True # EdmType.BOOLEAN entity.ratio = 3.1 # EdmType.DOUBLE entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME # Binary, Int32 and GUID must be explicitly typed entity.binary = EntityProperty(EdmType.BINARY, b'xyz') entity.other = EntityProperty(EdmType.INT32, 20) entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833') return entity
def touch(self, eventType=adbclient.DOWN_AND_UP, deltaX=0, deltaY=0): ''' Touches the center of this C{View}. The touch can be displaced from the center by using C{deltaX} and C{deltaY} values. @param eventType: The event type @type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP} @param deltaX: Displacement from center (X axis) @type deltaX: int @param deltaY: Displacement from center (Y axis) @type deltaY: int ''' (x, y) = self.getCenter() if deltaX: x += deltaX if deltaY: y += deltaY if DEBUG_TOUCH: print >>sys.stderr, "should touch @ (%d, %d)" % (x, y) if VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED and eventType == adbclient.DOWN_AND_UP: if WARNINGS: print >> sys.stderr, "ViewClient: touch workaround enabled" self.device.touch(x, y, eventType=adbclient.DOWN) time.sleep(50/1000.0) self.device.touch(x+10, y+10, eventType=adbclient.UP) else: if self.uiAutomatorHelper: selector = self.obtainSelectorForView() if selector: try: oid = self.uiAutomatorHelper.findObject(bySelector=selector) if DEBUG_UI_AUTOMATOR_HELPER: print >> sys.stderr, "oid=", oid print >> sys.stderr, "ignoring click delta to click View as UiObject" oid.click(); except RuntimeError as e: print >> sys.stderr, e.message print >> sys.stderr, "UiObject click failed, using co-ordinates" self.uiAutomatorHelper.click(x=x, y=y) else: # FIXME: # The View has no CD, TEXT or ID so we cannot use it in a selector to findObject() # We should try content description, text, and perhaps other properties before surrendering. # For now, tet's fall back to click(x, y) self.uiAutomatorHelper.click(x=x, y=y) else: self.device.touch(x, y, eventType=eventType)
Touches the center of this C{View}. The touch can be displaced from the center by using C{deltaX} and C{deltaY} values. @param eventType: The event type @type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP} @param deltaX: Displacement from center (X axis) @type deltaX: int @param deltaY: Displacement from center (Y axis) @type deltaY: int
Below is the the instruction that describes the task: ### Input: Touches the center of this C{View}. The touch can be displaced from the center by using C{deltaX} and C{deltaY} values. @param eventType: The event type @type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP} @param deltaX: Displacement from center (X axis) @type deltaX: int @param deltaY: Displacement from center (Y axis) @type deltaY: int ### Response: def touch(self, eventType=adbclient.DOWN_AND_UP, deltaX=0, deltaY=0): ''' Touches the center of this C{View}. The touch can be displaced from the center by using C{deltaX} and C{deltaY} values. @param eventType: The event type @type eventType: L{adbclient.DOWN}, L{adbclient.UP} or L{adbclient.DOWN_AND_UP} @param deltaX: Displacement from center (X axis) @type deltaX: int @param deltaY: Displacement from center (Y axis) @type deltaY: int ''' (x, y) = self.getCenter() if deltaX: x += deltaX if deltaY: y += deltaY if DEBUG_TOUCH: print >>sys.stderr, "should touch @ (%d, %d)" % (x, y) if VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED and eventType == adbclient.DOWN_AND_UP: if WARNINGS: print >> sys.stderr, "ViewClient: touch workaround enabled" self.device.touch(x, y, eventType=adbclient.DOWN) time.sleep(50/1000.0) self.device.touch(x+10, y+10, eventType=adbclient.UP) else: if self.uiAutomatorHelper: selector = self.obtainSelectorForView() if selector: try: oid = self.uiAutomatorHelper.findObject(bySelector=selector) if DEBUG_UI_AUTOMATOR_HELPER: print >> sys.stderr, "oid=", oid print >> sys.stderr, "ignoring click delta to click View as UiObject" oid.click(); except RuntimeError as e: print >> sys.stderr, e.message print >> sys.stderr, "UiObject click failed, using co-ordinates" self.uiAutomatorHelper.click(x=x, y=y) else: # FIXME: # The View has no CD, TEXT or ID so we cannot use it in a selector to findObject() # We should try content description, text, and perhaps other properties before surrendering. # For now, tet's fall back to click(x, y) self.uiAutomatorHelper.click(x=x, y=y) else: self.device.touch(x, y, eventType=eventType)
def _get_object_key(self, p_object): """Get key from object""" matched_key = None matched_index = None if hasattr(p_object, self._searchNames[0]): return getattr(p_object, self._searchNames[0]) for x in xrange(len(self._searchNames)): key = self._searchNames[x] if hasattr(p_object, key): matched_key = key matched_index = x if matched_key is None: raise KeyError() if matched_index != 0 and self._searchOptimize: self._searchNames.insert(0, self._searchNames.pop(matched_index)) return getattr(p_object, matched_key)
Get key from object
Below is the the instruction that describes the task: ### Input: Get key from object ### Response: def _get_object_key(self, p_object): """Get key from object""" matched_key = None matched_index = None if hasattr(p_object, self._searchNames[0]): return getattr(p_object, self._searchNames[0]) for x in xrange(len(self._searchNames)): key = self._searchNames[x] if hasattr(p_object, key): matched_key = key matched_index = x if matched_key is None: raise KeyError() if matched_index != 0 and self._searchOptimize: self._searchNames.insert(0, self._searchNames.pop(matched_index)) return getattr(p_object, matched_key)
def get_python(cls): """ returns the python and pip version :return: python version, pip version """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] python_version_s = '.'.join(v_string) # pip_version = pip.__version__ pip_version = Shell.pip("--version").split()[1] return python_version_s, pip_version
returns the python and pip version :return: python version, pip version
Below is the the instruction that describes the task: ### Input: returns the python and pip version :return: python version, pip version ### Response: def get_python(cls): """ returns the python and pip version :return: python version, pip version """ python_version = sys.version_info[:3] v_string = [str(i) for i in python_version] python_version_s = '.'.join(v_string) # pip_version = pip.__version__ pip_version = Shell.pip("--version").split()[1] return python_version_s, pip_version
def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) blocksize = 8192 # Python 2.7 array objects have a read method which is incompatible # with the 2-arg calling syntax below. if hasattr(data, "read") and not isinstance(data, array): if self.debuglevel > 0: print("sendIng a read()able") encode = False try: mode = data.mode except AttributeError: # io.BytesIO and other file-like objects don't have a `mode` # attribute. pass else: if "b" not in mode: encode = True if self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") self.sock.sendall(datablock) return try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data))
Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object.
Below is the the instruction that describes the task: ### Input: Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. ### Response: def send(self, data): """Send `data' to the server. ``data`` can be a string object, a bytes object, an array object, a file-like object that supports a .read() method, or an iterable object. """ if self.sock is None: if self.auto_open: self.connect() else: raise NotConnected() if self.debuglevel > 0: print("send:", repr(data)) blocksize = 8192 # Python 2.7 array objects have a read method which is incompatible # with the 2-arg calling syntax below. if hasattr(data, "read") and not isinstance(data, array): if self.debuglevel > 0: print("sendIng a read()able") encode = False try: mode = data.mode except AttributeError: # io.BytesIO and other file-like objects don't have a `mode` # attribute. pass else: if "b" not in mode: encode = True if self.debuglevel > 0: print("encoding file using iso-8859-1") while 1: datablock = data.read(blocksize) if not datablock: break if encode: datablock = datablock.encode("iso-8859-1") self.sock.sendall(datablock) return try: self.sock.sendall(data) except TypeError: if isinstance(data, collections.Iterable): for d in data: self.sock.sendall(d) else: raise TypeError("data should be a bytes-like object " "or an iterable, got %r" % type(data))
def connect_timeout(self): """ Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None """ if self.total is None: return self._connect if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: return self.total return min(self._connect, self.total)
Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
Below is the the instruction that describes the task: ### Input: Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None ### Response: def connect_timeout(self): """ Get the value to use when setting a connection timeout. This will be a positive float or integer, the value None (never timeout), or the default system timeout. :return: Connect timeout. :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None """ if self.total is None: return self._connect if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: return self.total return min(self._connect, self.total)
def _flatten_location_translations(location_translations): """If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation. """ sources_to_process = set(six.iterkeys(location_translations)) def _update_translation(source): """Return the proper (fully-flattened) translation for the given location.""" destination = location_translations[source] if destination not in location_translations: # "destination" cannot be translated, no further flattening required. return destination else: # "destination" can itself be translated -- do so, # and then flatten "source" to the final translation as well. sources_to_process.discard(destination) final_destination = _update_translation(destination) location_translations[source] = final_destination return final_destination while sources_to_process: _update_translation(sources_to_process.pop())
If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation.
Below is the the instruction that describes the task: ### Input: If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation. ### Response: def _flatten_location_translations(location_translations): """If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation. """ sources_to_process = set(six.iterkeys(location_translations)) def _update_translation(source): """Return the proper (fully-flattened) translation for the given location.""" destination = location_translations[source] if destination not in location_translations: # "destination" cannot be translated, no further flattening required. return destination else: # "destination" can itself be translated -- do so, # and then flatten "source" to the final translation as well. sources_to_process.discard(destination) final_destination = _update_translation(destination) location_translations[source] = final_destination return final_destination while sources_to_process: _update_translation(sources_to_process.pop())
def spawn(self, monitor, kind=None, **params): '''Spawn a new :class:`Actor` and return its :class:`.ActorProxyMonitor`. ''' proxy = _spawn_actor(kind, monitor, **params) # Add to the list of managed actors if this is a remote actor if isinstance(proxy, Actor): self._register(proxy) return proxy else: proxy.monitor = monitor self.managed_actors[proxy.aid] = proxy future = actor_proxy_future(proxy) proxy.start() return future
Spawn a new :class:`Actor` and return its :class:`.ActorProxyMonitor`.
Below is the the instruction that describes the task: ### Input: Spawn a new :class:`Actor` and return its :class:`.ActorProxyMonitor`. ### Response: def spawn(self, monitor, kind=None, **params): '''Spawn a new :class:`Actor` and return its :class:`.ActorProxyMonitor`. ''' proxy = _spawn_actor(kind, monitor, **params) # Add to the list of managed actors if this is a remote actor if isinstance(proxy, Actor): self._register(proxy) return proxy else: proxy.monitor = monitor self.managed_actors[proxy.aid] = proxy future = actor_proxy_future(proxy) proxy.start() return future
def Run(self, args): """Lists a directory.""" try: directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) except (IOError, OSError) as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(directory.ListFiles()) files.sort(key=lambda x: x.pathspec.path) for response in files: self.SendReply(response)
Lists a directory.
Below is the the instruction that describes the task: ### Input: Lists a directory. ### Response: def Run(self, args): """Lists a directory.""" try: directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress) except (IOError, OSError) as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.IOERROR, e) return files = list(directory.ListFiles()) files.sort(key=lambda x: x.pathspec.path) for response in files: self.SendReply(response)
def ndvi(self): """ Normalized difference vegetation index. :return: NDVI """ red, nir = self.reflectance(3), self.reflectance(4) ndvi = self._divide_zero((nir - red), (nir + red), nan) return ndvi
Normalized difference vegetation index. :return: NDVI
Below is the the instruction that describes the task: ### Input: Normalized difference vegetation index. :return: NDVI ### Response: def ndvi(self): """ Normalized difference vegetation index. :return: NDVI """ red, nir = self.reflectance(3), self.reflectance(4) ndvi = self._divide_zero((nir - red), (nir + red), nan) return ndvi
def set_sub_stream_format(self, format, callback=None): ''' Set the stream fromat of sub stream???? ''' params = {'format': format} return self.execute_command('setSubStreamFormat', params, callback=callback)
Set the stream fromat of sub stream????
Below is the the instruction that describes the task: ### Input: Set the stream fromat of sub stream???? ### Response: def set_sub_stream_format(self, format, callback=None): ''' Set the stream fromat of sub stream???? ''' params = {'format': format} return self.execute_command('setSubStreamFormat', params, callback=callback)
def set_generation_type(self, num_processors=-1, num_splits=1000, verbose=-1): """Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1. """ self.parallel_input.num_processors = num_processors self.parallel_input.num_splits = num_splits self.parallel_input.verbose = verbose return
Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1.
Below is the the instruction that describes the task: ### Input: Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1. ### Response: def set_generation_type(self, num_processors=-1, num_splits=1000, verbose=-1): """Change generation type. Choose weather to generate the data in parallel or on a single processor. Args: num_processors (int or None, optional): Number of parallel processors to use. If ``num_processors==-1``, this will use multiprocessing module and use available cpus. If single generation is desired, num_processors is set to ``None``. Default is -1. num_splits (int, optional): Number of binaries to run during each process. Default is 1000. verbose (int, optional): Describes the notification of when parallel processes are finished. Value describes cadence of process completion notifications. If ``verbose == -1``, no notifications are given. Default is -1. """ self.parallel_input.num_processors = num_processors self.parallel_input.num_splits = num_splits self.parallel_input.verbose = verbose return
def OMSymbol(self, module, name): r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/') """ return om.OMSymbol(cdbase=self._cdbase, cd=module, name=name)
r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/')
Below is the the instruction that describes the task: ### Input: r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/') ### Response: def OMSymbol(self, module, name): r""" Helper function to build an OMS object EXAMPLES:: >>> from openmath.convert_pickle import PickleConverter >>> converter = PickleConverter() >>> o = converter.OMSymbol(module="foo.bar", name="baz"); o OMSymbol(name='baz', cd='foo.bar', id=None, cdbase='http://python.org/') """ return om.OMSymbol(cdbase=self._cdbase, cd=module, name=name)
def recordbatch(self, auth, resource, entries, defer=False): """ Records a list of historical entries to the resource specified. Calls a function that bulids a request that writes a list of historical entries to the specified resource. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. entries: A list of entries to write to the resource. """ return self._call('recordbatch', auth, [resource, entries], defer)
Records a list of historical entries to the resource specified. Calls a function that bulids a request that writes a list of historical entries to the specified resource. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. entries: A list of entries to write to the resource.
Below is the the instruction that describes the task: ### Input: Records a list of historical entries to the resource specified. Calls a function that bulids a request that writes a list of historical entries to the specified resource. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. entries: A list of entries to write to the resource. ### Response: def recordbatch(self, auth, resource, entries, defer=False): """ Records a list of historical entries to the resource specified. Calls a function that bulids a request that writes a list of historical entries to the specified resource. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. entries: A list of entries to write to the resource. """ return self._call('recordbatch', auth, [resource, entries], defer)
def format(self, record: logging.LogRecord) -> str: """ Formats a record and serializes it as a JSON str. If record message isnt already a dict, initializes a new dict and uses `default_msg_fieldname` as a key as the record msg as the value. """ msg: Union[str, dict] = record.msg if not isinstance(record.msg, dict): msg = {self.default_msg_fieldname: msg} if record.exc_info: # type: ignore msg["exc_info"] = record.exc_info if record.exc_text: # type: ignore msg["exc_text"] = record.exc_text # type: ignore return self.serializer(msg, default=self._default_handler)
Formats a record and serializes it as a JSON str. If record message isnt already a dict, initializes a new dict and uses `default_msg_fieldname` as a key as the record msg as the value.
Below is the the instruction that describes the task: ### Input: Formats a record and serializes it as a JSON str. If record message isnt already a dict, initializes a new dict and uses `default_msg_fieldname` as a key as the record msg as the value. ### Response: def format(self, record: logging.LogRecord) -> str: """ Formats a record and serializes it as a JSON str. If record message isnt already a dict, initializes a new dict and uses `default_msg_fieldname` as a key as the record msg as the value. """ msg: Union[str, dict] = record.msg if not isinstance(record.msg, dict): msg = {self.default_msg_fieldname: msg} if record.exc_info: # type: ignore msg["exc_info"] = record.exc_info if record.exc_text: # type: ignore msg["exc_text"] = record.exc_text # type: ignore return self.serializer(msg, default=self._default_handler)
def _get_token_create_url(config): ''' Create Vault url for token creation ''' role_name = config.get('role_name', None) auth_path = '/v1/auth/token/create' base_url = config['url'] return '/'.join(x.strip('/') for x in (base_url, auth_path, role_name) if x)
Create Vault url for token creation
Below is the the instruction that describes the task: ### Input: Create Vault url for token creation ### Response: def _get_token_create_url(config): ''' Create Vault url for token creation ''' role_name = config.get('role_name', None) auth_path = '/v1/auth/token/create' base_url = config['url'] return '/'.join(x.strip('/') for x in (base_url, auth_path, role_name) if x)
def main(): """ Build a standalone dql executable """ venv_dir = tempfile.mkdtemp() try: make_virtualenv(venv_dir) print("Downloading dependencies") pip = os.path.join(venv_dir, "bin", "pip") subprocess.check_call([pip, "install", "pex"]) print("Building executable") pex = os.path.join(venv_dir, "bin", "pex") subprocess.check_call([pex, "dql", "-m", "dql:main", "-o", "dql"]) print("dql executable written to %s" % os.path.abspath("dql")) finally: shutil.rmtree(venv_dir)
Build a standalone dql executable
Below is the the instruction that describes the task: ### Input: Build a standalone dql executable ### Response: def main(): """ Build a standalone dql executable """ venv_dir = tempfile.mkdtemp() try: make_virtualenv(venv_dir) print("Downloading dependencies") pip = os.path.join(venv_dir, "bin", "pip") subprocess.check_call([pip, "install", "pex"]) print("Building executable") pex = os.path.join(venv_dir, "bin", "pex") subprocess.check_call([pex, "dql", "-m", "dql:main", "-o", "dql"]) print("dql executable written to %s" % os.path.abspath("dql")) finally: shutil.rmtree(venv_dir)
def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
Clean documentation generated files.
Below is the the instruction that describes the task: ### Input: Clean documentation generated files. ### Response: def clean(): """ Clean documentation generated files. """ shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
def run(self, result): """Run tests in suite inside of suite fixtures. """ # proxy the result for myself log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests) #import pdb #pdb.set_trace() if self.resultProxy: result, orig = self.resultProxy(result, self), result else: result, orig = result, result try: self.setUp() except KeyboardInterrupt: raise except: self.error_context = 'setup' result.addError(self, self._exc_info()) return try: for test in self._tests: if result.shouldStop: log.debug("stopping") break # each nose.case.Test will create its own result proxy # so the cases need the original result, to avoid proxy # chains test(orig) finally: self.has_run = True try: self.tearDown() except KeyboardInterrupt: raise except: self.error_context = 'teardown' result.addError(self, self._exc_info())
Run tests in suite inside of suite fixtures.
Below is the the instruction that describes the task: ### Input: Run tests in suite inside of suite fixtures. ### Response: def run(self, result): """Run tests in suite inside of suite fixtures. """ # proxy the result for myself log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests) #import pdb #pdb.set_trace() if self.resultProxy: result, orig = self.resultProxy(result, self), result else: result, orig = result, result try: self.setUp() except KeyboardInterrupt: raise except: self.error_context = 'setup' result.addError(self, self._exc_info()) return try: for test in self._tests: if result.shouldStop: log.debug("stopping") break # each nose.case.Test will create its own result proxy # so the cases need the original result, to avoid proxy # chains test(orig) finally: self.has_run = True try: self.tearDown() except KeyboardInterrupt: raise except: self.error_context = 'teardown' result.addError(self, self._exc_info())
def configure_app(config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None): """ :param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes. """ global __configured project_filename = sanitize_name(project) if default_config_path is None: default_config_path = '~/%s/%s.conf.py' % (project_filename, project_filename) if settings_envvar is None: settings_envvar = project_filename.upper() + '_CONF' if config_module_name is None: config_module_name = project_filename + '_config' # normalize path if settings_envvar in os.environ: default_config_path = os.environ.get(settings_envvar) else: default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path))) if not config_path: config_path = default_config_path config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): if runner_name: raise ValueError("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)) raise ValueError("Configuration file does not exist at %r" % (config_path,)) os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name def settings_callback(settings): if initializer is None: return try: initializer({ 'project': project, 'config_path': config_path, 'settings': settings, }) except Exception: # XXX: Django doesn't like various errors in this path import sys import traceback traceback.print_exc() sys.exit(1) importer.install( config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback) __configured = True # HACK(dcramer): we need to force access of django.conf.settings to # ensure we don't hit any import-driven recursive behavior from django.conf import settings hasattr(settings, 'INSTALLED_APPS') if on_configure: on_configure({ 'project': project, 'config_path': config_path, 'settings': settings, })
:param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes.
Below is the the instruction that describes the task: ### Input: :param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes. ### Response: def configure_app(config_path=None, project=None, default_config_path=None, default_settings=None, settings_initializer=None, settings_envvar=None, initializer=None, allow_extras=True, config_module_name=None, runner_name=None, on_configure=None): """ :param project: should represent the canonical name for the project, generally the same name it assigned in distutils. :param default_config_path: the default location for the configuration file. :param default_settings: default settings to load (think inheritence). :param settings_initializer: a callback function which should return a string representing the default settings template to generate. :param initializer: a callback function which will be executed before the command is executed. It is passed a dictionary of various configuration attributes. """ global __configured project_filename = sanitize_name(project) if default_config_path is None: default_config_path = '~/%s/%s.conf.py' % (project_filename, project_filename) if settings_envvar is None: settings_envvar = project_filename.upper() + '_CONF' if config_module_name is None: config_module_name = project_filename + '_config' # normalize path if settings_envvar in os.environ: default_config_path = os.environ.get(settings_envvar) else: default_config_path = os.path.normpath(os.path.abspath(os.path.expanduser(default_config_path))) if not config_path: config_path = default_config_path config_path = os.path.expanduser(config_path) if not os.path.exists(config_path): if runner_name: raise ValueError("Configuration file does not exist. Use '%s init' to initialize the file." % (runner_name,)) raise ValueError("Configuration file does not exist at %r" % (config_path,)) os.environ['DJANGO_SETTINGS_MODULE'] = config_module_name def settings_callback(settings): if initializer is None: return try: initializer({ 'project': project, 'config_path': config_path, 'settings': settings, }) except Exception: # XXX: Django doesn't like various errors in this path import sys import traceback traceback.print_exc() sys.exit(1) importer.install( config_module_name, config_path, default_settings, allow_extras=allow_extras, callback=settings_callback) __configured = True # HACK(dcramer): we need to force access of django.conf.settings to # ensure we don't hit any import-driven recursive behavior from django.conf import settings hasattr(settings, 'INSTALLED_APPS') if on_configure: on_configure({ 'project': project, 'config_path': config_path, 'settings': settings, })
def as_search_action(self, *, index, action): """ Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary. """ if action not in ("index", "update", "delete"): raise ValueError("Action must be 'index', 'update' or 'delete'.") document = { "_index": index, "_type": self.search_doc_type, "_op_type": action, "_id": self.pk, } if action == "index": document["_source"] = self.as_search_document(index=index) elif action == "update": document["doc"] = self.as_search_document(index=index) return document
Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary.
Below is the the instruction that describes the task: ### Input: Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary. ### Response: def as_search_action(self, *, index, action): """ Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary. """ if action not in ("index", "update", "delete"): raise ValueError("Action must be 'index', 'update' or 'delete'.") document = { "_index": index, "_type": self.search_doc_type, "_op_type": action, "_id": self.pk, } if action == "index": document["_source"] = self.as_search_document(index=index) elif action == "update": document["doc"] = self.as_search_document(index=index) return document
def get_all_subnets(self, subnet_ids=None, filters=None): """ Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet` """ params = {} if subnet_ids: self.build_list_params(params, subnet_ids, 'SubnetId') if filters: i = 1 for filter in filters: params[('Filter.%d.Name' % i)] = filter[0] params[('Filter.%d.Value.1' % i)] = filter[1] i += 1 return self.get_list('DescribeSubnets', params, [('item', Subnet)])
Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet`
Below is the the instruction that describes the task: ### Input: Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet` ### Response: def get_all_subnets(self, subnet_ids=None, filters=None): """ Retrieve information about your Subnets. You can filter results to return information only about those Subnets that match your search parameters. Otherwise, all Subnets associated with your account are returned. :type subnet_ids: list :param subnet_ids: A list of strings with the desired Subnet ID's :type filters: list of tuples :param filters: A list of tuples containing filters. Each tuple consists of a filter key and a filter value. Possible filter keys are: - *state*, the state of the Subnet (pending,available) - *vpdId*, the ID of teh VPC the subnet is in. - *cidrBlock*, CIDR block of the subnet - *availabilityZone*, the Availability Zone the subnet is in. :rtype: list :return: A list of :class:`boto.vpc.subnet.Subnet` """ params = {} if subnet_ids: self.build_list_params(params, subnet_ids, 'SubnetId') if filters: i = 1 for filter in filters: params[('Filter.%d.Name' % i)] = filter[0] params[('Filter.%d.Value.1' % i)] = filter[1] i += 1 return self.get_list('DescribeSubnets', params, [('item', Subnet)])
def get_listing_api(self, resource): """ Generates the meta descriptor for the resource listing api. """ get_all_params = self.get_listing_parameters(resource) get_all_api = { 'path': '/%s/' % resource.get_api_name(), 'description': 'Operations on %s' % resource.model.__name__, 'operations': [ { 'httpMethod': 'GET', 'nickname': 'list%ss' % resource.model .__name__, 'summary': 'Find %ss' % resource.model.__name__, 'parameters': get_all_params, } ] } return get_all_api
Generates the meta descriptor for the resource listing api.
Below is the the instruction that describes the task: ### Input: Generates the meta descriptor for the resource listing api. ### Response: def get_listing_api(self, resource): """ Generates the meta descriptor for the resource listing api. """ get_all_params = self.get_listing_parameters(resource) get_all_api = { 'path': '/%s/' % resource.get_api_name(), 'description': 'Operations on %s' % resource.model.__name__, 'operations': [ { 'httpMethod': 'GET', 'nickname': 'list%ss' % resource.model .__name__, 'summary': 'Find %ss' % resource.model.__name__, 'parameters': get_all_params, } ] } return get_all_api
def LeaseFlowForProcessing(self, client_id, flow_id, processing_time, cursor=None): """Marks a flow as being processed on this worker and returns it.""" query = ("SELECT " + self.FLOW_DB_FIELDS + "FROM flows WHERE client_id=%s AND flow_id=%s") cursor.execute( query, [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]) response = cursor.fetchall() if not response: raise db.UnknownFlowError(client_id, flow_id) row, = response rdf_flow = self._FlowObjectFromRow(row) now = rdfvalue.RDFDatetime.Now() if rdf_flow.processing_on and rdf_flow.processing_deadline > now: raise ValueError("Flow %s on client %s is already being processed." % (client_id, flow_id)) if (rdf_flow.parent_hunt_id is not None and # TODO(user): remove the check for a legacy hunt prefix as soon as # AFF4 is gone. not rdf_flow.parent_hunt_id.startswith("H:")): query = "SELECT hunt_state FROM hunts WHERE hunt_id=%s" args = [db_utils.HuntIDToInt(rdf_flow.parent_hunt_id)] rows_found = cursor.execute(query, args) if rows_found == 1: hunt_state, = cursor.fetchone() if (hunt_state is not None and not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(hunt_state)): raise db.ParentHuntIsNotRunningError(client_id, flow_id, rdf_flow.parent_hunt_id, hunt_state) update_query = ("UPDATE flows SET " "processing_on=%s, " "processing_since=FROM_UNIXTIME(%s), " "processing_deadline=FROM_UNIXTIME(%s) " "WHERE client_id=%s and flow_id=%s") processing_deadline = now + processing_time process_id_string = utils.ProcessIdString() args = [ process_id_string, mysql_utils.RDFDatetimeToTimestamp(now), mysql_utils.RDFDatetimeToTimestamp(processing_deadline), db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id) ] cursor.execute(update_query, args) # This needs to happen after we are sure that the write has succeeded. rdf_flow.processing_on = process_id_string rdf_flow.processing_since = now rdf_flow.processing_deadline = processing_deadline return rdf_flow
Marks a flow as being processed on this worker and returns it.
Below is the the instruction that describes the task: ### Input: Marks a flow as being processed on this worker and returns it. ### Response: def LeaseFlowForProcessing(self, client_id, flow_id, processing_time, cursor=None): """Marks a flow as being processed on this worker and returns it.""" query = ("SELECT " + self.FLOW_DB_FIELDS + "FROM flows WHERE client_id=%s AND flow_id=%s") cursor.execute( query, [db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id)]) response = cursor.fetchall() if not response: raise db.UnknownFlowError(client_id, flow_id) row, = response rdf_flow = self._FlowObjectFromRow(row) now = rdfvalue.RDFDatetime.Now() if rdf_flow.processing_on and rdf_flow.processing_deadline > now: raise ValueError("Flow %s on client %s is already being processed." % (client_id, flow_id)) if (rdf_flow.parent_hunt_id is not None and # TODO(user): remove the check for a legacy hunt prefix as soon as # AFF4 is gone. not rdf_flow.parent_hunt_id.startswith("H:")): query = "SELECT hunt_state FROM hunts WHERE hunt_id=%s" args = [db_utils.HuntIDToInt(rdf_flow.parent_hunt_id)] rows_found = cursor.execute(query, args) if rows_found == 1: hunt_state, = cursor.fetchone() if (hunt_state is not None and not rdf_hunt_objects.IsHuntSuitableForFlowProcessing(hunt_state)): raise db.ParentHuntIsNotRunningError(client_id, flow_id, rdf_flow.parent_hunt_id, hunt_state) update_query = ("UPDATE flows SET " "processing_on=%s, " "processing_since=FROM_UNIXTIME(%s), " "processing_deadline=FROM_UNIXTIME(%s) " "WHERE client_id=%s and flow_id=%s") processing_deadline = now + processing_time process_id_string = utils.ProcessIdString() args = [ process_id_string, mysql_utils.RDFDatetimeToTimestamp(now), mysql_utils.RDFDatetimeToTimestamp(processing_deadline), db_utils.ClientIDToInt(client_id), db_utils.FlowIDToInt(flow_id) ] cursor.execute(update_query, args) # This needs to happen after we are sure that the write has succeeded. rdf_flow.processing_on = process_id_string rdf_flow.processing_since = now rdf_flow.processing_deadline = processing_deadline return rdf_flow
def __get_conn(**kwargs): ''' Detects what type of dom this node is and attempts to connect to the correct hypervisor via libvirt. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults ''' # This has only been tested on kvm and xen, it needs to be expanded to # support all vm layers supported by libvirt # Connection string works on bhyve, but auth is not tested. username = kwargs.get('username', None) password = kwargs.get('password', None) conn_str = kwargs.get('connection', None) if not conn_str: conn_str = __salt__['config.get']('virt.connect', None) if conn_str is not None: salt.utils.versions.warn_until( 'Sodium', '\'virt.connect\' configuration property has been deprecated in favor ' 'of \'virt:connection:uri\'. \'virt.connect\' will stop being used in ' '{version}.' ) else: conn_str = __salt__['config.get']('libvirt:connection', None) if conn_str is not None: salt.utils.versions.warn_until( 'Sodium', '\'libvirt.connection\' configuration property has been deprecated in favor ' 'of \'virt:connection:uri\'. \'libvirt.connection\' will stop being used in ' '{version}.' ) conn_str = __salt__['config.get']('virt:connection:uri', conn_str) hypervisor = __salt__['config.get']('libvirt:hypervisor', None) if hypervisor is not None: salt.utils.versions.warn_until( 'Sodium', '\'libvirt.hypervisor\' configuration property has been deprecated. ' 'Rather use the \'virt:connection:uri\' to properly define the libvirt ' 'URI or alias of the host to connect to. \'libvirt:hypervisor\' will ' 'stop being used in {version}.' ) if hypervisor == 'esxi' and conn_str is None: salt.utils.versions.warn_until( 'Sodium', 'esxi hypervisor default with no default connection URI detected, ' 'please set \'virt:connection:uri\' to \'esx\' for keep the legacy ' 'behavior. Will default to libvirt guess once \'libvirt:hypervisor\' ' 'configuration is removed in {version}.' ) conn_str = 'esx' try: auth_types = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT, libvirt.VIR_CRED_ECHOPROMPT, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_EXTERNAL] conn = libvirt.openAuth(conn_str, [auth_types, __get_request_auth(username, password), None], 0) except Exception: raise CommandExecutionError( 'Sorry, {0} failed to open a connection to the hypervisor ' 'software at {1}'.format( __grains__['fqdn'], conn_str ) ) return conn
Detects what type of dom this node is and attempts to connect to the correct hypervisor via libvirt. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults
Below is the the instruction that describes the task: ### Input: Detects what type of dom this node is and attempts to connect to the correct hypervisor via libvirt. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults ### Response: def __get_conn(**kwargs): ''' Detects what type of dom this node is and attempts to connect to the correct hypervisor via libvirt. :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults ''' # This has only been tested on kvm and xen, it needs to be expanded to # support all vm layers supported by libvirt # Connection string works on bhyve, but auth is not tested. username = kwargs.get('username', None) password = kwargs.get('password', None) conn_str = kwargs.get('connection', None) if not conn_str: conn_str = __salt__['config.get']('virt.connect', None) if conn_str is not None: salt.utils.versions.warn_until( 'Sodium', '\'virt.connect\' configuration property has been deprecated in favor ' 'of \'virt:connection:uri\'. \'virt.connect\' will stop being used in ' '{version}.' ) else: conn_str = __salt__['config.get']('libvirt:connection', None) if conn_str is not None: salt.utils.versions.warn_until( 'Sodium', '\'libvirt.connection\' configuration property has been deprecated in favor ' 'of \'virt:connection:uri\'. \'libvirt.connection\' will stop being used in ' '{version}.' ) conn_str = __salt__['config.get']('virt:connection:uri', conn_str) hypervisor = __salt__['config.get']('libvirt:hypervisor', None) if hypervisor is not None: salt.utils.versions.warn_until( 'Sodium', '\'libvirt.hypervisor\' configuration property has been deprecated. ' 'Rather use the \'virt:connection:uri\' to properly define the libvirt ' 'URI or alias of the host to connect to. \'libvirt:hypervisor\' will ' 'stop being used in {version}.' ) if hypervisor == 'esxi' and conn_str is None: salt.utils.versions.warn_until( 'Sodium', 'esxi hypervisor default with no default connection URI detected, ' 'please set \'virt:connection:uri\' to \'esx\' for keep the legacy ' 'behavior. Will default to libvirt guess once \'libvirt:hypervisor\' ' 'configuration is removed in {version}.' ) conn_str = 'esx' try: auth_types = [libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT, libvirt.VIR_CRED_ECHOPROMPT, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_EXTERNAL] conn = libvirt.openAuth(conn_str, [auth_types, __get_request_auth(username, password), None], 0) except Exception: raise CommandExecutionError( 'Sorry, {0} failed to open a connection to the hypervisor ' 'software at {1}'.format( __grains__['fqdn'], conn_str ) ) return conn
def parse_checkM_tables(tables): """ convert checkM genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('Bin Id'): header = line header[8] = 'genome size (bp)' header[5] = '#SCGs' header[6] = '#SCG duplicates' continue ID, info = line[0], line info = [to_int(i) for i in info] ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
convert checkM genome info tables to dictionary
Below is the the instruction that describes the task: ### Input: convert checkM genome info tables to dictionary ### Response: def parse_checkM_tables(tables): """ convert checkM genome info tables to dictionary """ g2info = {} for table in tables: for line in open(table): line = line.strip().split('\t') if line[0].startswith('Bin Id'): header = line header[8] = 'genome size (bp)' header[5] = '#SCGs' header[6] = '#SCG duplicates' continue ID, info = line[0], line info = [to_int(i) for i in info] ID = ID.replace(' ', '') g2info[ID] = {item:stat for item, stat in zip(header, info)} if g2info[ID]['genome size (bp)'] == '': g2info[ID]['genome size (bp)'] = 0 return g2info
def insertFile(self, qInserts=False): """ API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....] """ if qInserts in (False, 'False'): qInserts=False try: body = request.body.read() indata = cjson.decode(body)["files"] if not isinstance(indata, (list, dict)): dbsExceptionHandler("dbsException-invalid-input", "Invalid Input DataType", self.logger.exception, \ "insertFile expects input as list or dirc") businput = [] if isinstance(indata, dict): indata = [indata] indata = validateJSONInputNoCopy("files", indata) for f in indata: f.update({ #"dataset":f["dataset"], "creation_date": f.get("creation_date", dbsUtils().getTime()), "create_by" : dbsUtils().getCreateBy(), "last_modification_date": f.get("last_modification_date", dbsUtils().getTime()), "last_modified_by": f.get("last_modified_by", dbsUtils().getCreateBy()), "file_lumi_list":f.get("file_lumi_list", []), "file_parent_list":f.get("file_parent_list", []), "file_assoc_list":f.get("assoc_list", []), "file_output_config_list":f.get("file_output_config_list", [])}) businput.append(f) self.dbsFile.insertFile(businput, qInserts) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert File input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
Below is the the instruction that describes the task: ### Input: API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....] ### Response: def insertFile(self, qInserts=False): """ API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request. :param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes. :type qInserts: bool :param filesList: List of dictionaries containing following information :type filesList: list of dicts :key logical_file_name: File to be inserted (str) (Required) :key is_file_valid: (optional, default = 1): (bool) :key block: required: /a/b/c#d (str) :key dataset: required: /a/b/c (str) :key file_type: (optional, default = EDM) one of the predefined types, (str) :key check_sum: (optional, default = '-1') (str) :key event_count: (optional, default = -1) (int) :key file_size: (optional, default = -1.) (float) :key adler32: (optional, default = '') (str) :key md5: (optional, default = '') (str) :key auto_cross_section: (optional, default = -1.) (float) :key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....] :key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....] :key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....] """ if qInserts in (False, 'False'): qInserts=False try: body = request.body.read() indata = cjson.decode(body)["files"] if not isinstance(indata, (list, dict)): dbsExceptionHandler("dbsException-invalid-input", "Invalid Input DataType", self.logger.exception, \ "insertFile expects input as list or dirc") businput = [] if isinstance(indata, dict): indata = [indata] indata = validateJSONInputNoCopy("files", indata) for f in indata: f.update({ #"dataset":f["dataset"], "creation_date": f.get("creation_date", dbsUtils().getTime()), "create_by" : dbsUtils().getCreateBy(), "last_modification_date": f.get("last_modification_date", dbsUtils().getTime()), "last_modified_by": f.get("last_modified_by", dbsUtils().getCreateBy()), "file_lumi_list":f.get("file_lumi_list", []), "file_parent_list":f.get("file_parent_list", []), "file_assoc_list":f.get("assoc_list", []), "file_output_config_list":f.get("file_output_config_list", [])}) businput.append(f) self.dbsFile.insertFile(businput, qInserts) except cjson.DecodeError as dc: dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert File input", self.logger.exception, str(dc)) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message) except HTTPError as he: raise he except Exception as ex: sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
def result(self, result, functionality): """ For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init """ for enumerate in result: # The host is a special header, we must not attempt to display it. if enumerate == "host" or enumerate == "cms_name": continue result_ind = result[enumerate] finds = result_ind['finds'] is_empty = result_ind['is_empty'] template_str = functionality[enumerate]['template'] template_params = { 'noun': enumerate, 'Noun': enumerate.capitalize(), 'items': finds, 'empty': is_empty, } self.echo(template(template_str, template_params))
For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init
Below is the the instruction that describes the task: ### Input: For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init ### Response: def result(self, result, functionality): """ For the final result of the scan. @param result: as returned by BasePluginInternal.url_scan @param functionality: functionality as returned by BasePluginInternal._general_init """ for enumerate in result: # The host is a special header, we must not attempt to display it. if enumerate == "host" or enumerate == "cms_name": continue result_ind = result[enumerate] finds = result_ind['finds'] is_empty = result_ind['is_empty'] template_str = functionality[enumerate]['template'] template_params = { 'noun': enumerate, 'Noun': enumerate.capitalize(), 'items': finds, 'empty': is_empty, } self.echo(template(template_str, template_params))
def marvcli_undiscard(datasets): """Undiscard DATASETS previously discarded.""" create_app() setids = parse_setids(datasets, discarded=True) dataset = Dataset.__table__ stmt = dataset.update()\ .where(dataset.c.setid.in_(setids))\ .values(discarded=False) db.session.execute(stmt) db.session.commit()
Undiscard DATASETS previously discarded.
Below is the the instruction that describes the task: ### Input: Undiscard DATASETS previously discarded. ### Response: def marvcli_undiscard(datasets): """Undiscard DATASETS previously discarded.""" create_app() setids = parse_setids(datasets, discarded=True) dataset = Dataset.__table__ stmt = dataset.update()\ .where(dataset.c.setid.in_(setids))\ .values(discarded=False) db.session.execute(stmt) db.session.commit()
def scale(data, scale = (1.,1.,1.), interpolation = "linear"): """ returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output """ if not (isinstance(data, np.ndarray) and data.ndim == 3): raise ValueError("input data has to be a 3d array!") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) options_types = {np.uint8:["-D","TYPENAME=uchar","-D","READ_IMAGE=read_imageui"], np.uint16: ["-D","TYPENAME=short","-D", "READ_IMAGE=read_imageui"], np.float32: ["-D","TYPENAME=float", "-D","READ_IMAGE=read_imagef"], } dtype = data.dtype.type if not dtype in options_types: raise ValueError("type %s not supported! Available: %s"%(dtype ,str(list(options_types.keys())))) if not isinstance(scale,(tuple, list, np.ndarray)): scale = (scale,)*3 if len(scale) != 3: raise ValueError("scale = %s misformed"%scale) d_im = OCLImage.from_array(data) nshape = _scale_shape(data.shape,scale) res_g = OCLArray.empty(nshape,dtype) prog = OCLProgram(abspath("kernels/scale.cl"), build_options=interpolation_defines[interpolation]+options_types[dtype ]) prog.run_kernel("scale", res_g.shape[::-1],None, d_im,res_g.data) return res_g.get()
returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output
Below is the the instruction that describes the task: ### Input: returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output ### Response: def scale(data, scale = (1.,1.,1.), interpolation = "linear"): """ returns a interpolated, scaled version of data the output shape is scaled too. Parameters ---------- data: ndarray 3d input array scale: float, tuple scaling factor along each axis (x,y,z) interpolation: str either "nearest" or "linear" Returns ------- scaled output """ if not (isinstance(data, np.ndarray) and data.ndim == 3): raise ValueError("input data has to be a 3d array!") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) options_types = {np.uint8:["-D","TYPENAME=uchar","-D","READ_IMAGE=read_imageui"], np.uint16: ["-D","TYPENAME=short","-D", "READ_IMAGE=read_imageui"], np.float32: ["-D","TYPENAME=float", "-D","READ_IMAGE=read_imagef"], } dtype = data.dtype.type if not dtype in options_types: raise ValueError("type %s not supported! Available: %s"%(dtype ,str(list(options_types.keys())))) if not isinstance(scale,(tuple, list, np.ndarray)): scale = (scale,)*3 if len(scale) != 3: raise ValueError("scale = %s misformed"%scale) d_im = OCLImage.from_array(data) nshape = _scale_shape(data.shape,scale) res_g = OCLArray.empty(nshape,dtype) prog = OCLProgram(abspath("kernels/scale.cl"), build_options=interpolation_defines[interpolation]+options_types[dtype ]) prog.run_kernel("scale", res_g.shape[::-1],None, d_im,res_g.data) return res_g.get()
def get_message_handler(self, message_handlers): """ Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler """ encoder = self.options.encoder try: return message_handlers[encoder] except KeyError: raise NotImplementedError('No RequestHandler defined for given encoder (%s).' % encoder)
Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler
Below is the the instruction that describes the task: ### Input: Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler ### Response: def get_message_handler(self, message_handlers): """ Create a MessageHandler for the configured Encoder :param message_handlers: a dictionart of MessageHandler keyed by encoder :return: a MessageHandler """ encoder = self.options.encoder try: return message_handlers[encoder] except KeyError: raise NotImplementedError('No RequestHandler defined for given encoder (%s).' % encoder)
def pair(self, array, stats): """ :return (array, array_stats) if stats, else (array, None) """ if len(self.weights) > 1 and stats: statnames, statfuncs = zip(*stats) array_stats = compute_stats2(array, statfuncs, self.weights) else: array_stats = None return array, array_stats
:return (array, array_stats) if stats, else (array, None)
Below is the the instruction that describes the task: ### Input: :return (array, array_stats) if stats, else (array, None) ### Response: def pair(self, array, stats): """ :return (array, array_stats) if stats, else (array, None) """ if len(self.weights) > 1 and stats: statnames, statfuncs = zip(*stats) array_stats = compute_stats2(array, statfuncs, self.weights) else: array_stats = None return array, array_stats
def memoize(func): """Simple caching decorator.""" cache = {} @functools.wraps(func) def wrapper(*args, **kwargs): """Caching wrapper.""" key = (args, tuple(sorted(kwargs.items()))) if key in cache: return cache[key] else: result = func(*args, **kwargs) cache[key] = result return result return wrapper
Simple caching decorator.
Below is the the instruction that describes the task: ### Input: Simple caching decorator. ### Response: def memoize(func): """Simple caching decorator.""" cache = {} @functools.wraps(func) def wrapper(*args, **kwargs): """Caching wrapper.""" key = (args, tuple(sorted(kwargs.items()))) if key in cache: return cache[key] else: result = func(*args, **kwargs) cache[key] = result return result return wrapper