code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _showItemContextMenu(self, item, point, col): """Callback for contextMenuRequested() signal. Pops up item menu, if defined""" menu = getattr(item, '_menu', None) if menu: settitle = getattr(item, '_set_menu_title', None) if settitle: settitle() # self._current_item tells callbacks what item the menu was referring to point = self.etw.mapToGlobal(point) self._current_item = item self.etw.clearSelection() self.etw.setItemSelected(item, True) menu.exec_(point) else: self._current_item = None
Callback for contextMenuRequested() signal. Pops up item menu, if defined
Below is the the instruction that describes the task: ### Input: Callback for contextMenuRequested() signal. Pops up item menu, if defined ### Response: def _showItemContextMenu(self, item, point, col): """Callback for contextMenuRequested() signal. Pops up item menu, if defined""" menu = getattr(item, '_menu', None) if menu: settitle = getattr(item, '_set_menu_title', None) if settitle: settitle() # self._current_item tells callbacks what item the menu was referring to point = self.etw.mapToGlobal(point) self._current_item = item self.etw.clearSelection() self.etw.setItemSelected(item, True) menu.exec_(point) else: self._current_item = None
def info(message, *args, **kwargs): """ write a message to stdout """ if 'end' in kwargs: end = kwargs['end'] else: end = '\n' if len(args) == 0: sys.stdout.write(message) else: sys.stdout.write(message % args) sys.stdout.write(end) sys.stdout.flush()
write a message to stdout
Below is the the instruction that describes the task: ### Input: write a message to stdout ### Response: def info(message, *args, **kwargs): """ write a message to stdout """ if 'end' in kwargs: end = kwargs['end'] else: end = '\n' if len(args) == 0: sys.stdout.write(message) else: sys.stdout.write(message % args) sys.stdout.write(end) sys.stdout.flush()
def friendships_destroy(self, user_id=None, screen_name=None): """ Allows the authenticating user to unfollow the specified user. https://dev.twitter.com/docs/api/1.1/post/friendships/destroy :param str user_id: The screen name of the user for whom to unfollow. Required if ``screen_name`` isn't given. :param str screen_name: The ID of the user for whom to unfollow. Required if ``user_id`` isn't given. :returns: A dict containing the newly unfollowed user. """ params = {} set_str_param(params, 'user_id', user_id) set_str_param(params, 'screen_name', screen_name) return self._post_api('friendships/destroy.json', params)
Allows the authenticating user to unfollow the specified user. https://dev.twitter.com/docs/api/1.1/post/friendships/destroy :param str user_id: The screen name of the user for whom to unfollow. Required if ``screen_name`` isn't given. :param str screen_name: The ID of the user for whom to unfollow. Required if ``user_id`` isn't given. :returns: A dict containing the newly unfollowed user.
Below is the the instruction that describes the task: ### Input: Allows the authenticating user to unfollow the specified user. https://dev.twitter.com/docs/api/1.1/post/friendships/destroy :param str user_id: The screen name of the user for whom to unfollow. Required if ``screen_name`` isn't given. :param str screen_name: The ID of the user for whom to unfollow. Required if ``user_id`` isn't given. :returns: A dict containing the newly unfollowed user. ### Response: def friendships_destroy(self, user_id=None, screen_name=None): """ Allows the authenticating user to unfollow the specified user. https://dev.twitter.com/docs/api/1.1/post/friendships/destroy :param str user_id: The screen name of the user for whom to unfollow. Required if ``screen_name`` isn't given. :param str screen_name: The ID of the user for whom to unfollow. Required if ``user_id`` isn't given. :returns: A dict containing the newly unfollowed user. """ params = {} set_str_param(params, 'user_id', user_id) set_str_param(params, 'screen_name', screen_name) return self._post_api('friendships/destroy.json', params)
def frame_msg_ipc(body, header=None, raw_body=False): # pylint: disable=unused-argument ''' Frame the given message with our wire protocol for IPC For IPC, we don't need to be backwards compatible, so use the more efficient "use_bin_type=True" on Python 3. ''' framed_msg = {} if header is None: header = {} framed_msg['head'] = header framed_msg['body'] = body if six.PY2: return salt.utils.msgpack.dumps(framed_msg) else: return salt.utils.msgpack.dumps(framed_msg, use_bin_type=True)
Frame the given message with our wire protocol for IPC For IPC, we don't need to be backwards compatible, so use the more efficient "use_bin_type=True" on Python 3.
Below is the the instruction that describes the task: ### Input: Frame the given message with our wire protocol for IPC For IPC, we don't need to be backwards compatible, so use the more efficient "use_bin_type=True" on Python 3. ### Response: def frame_msg_ipc(body, header=None, raw_body=False): # pylint: disable=unused-argument ''' Frame the given message with our wire protocol for IPC For IPC, we don't need to be backwards compatible, so use the more efficient "use_bin_type=True" on Python 3. ''' framed_msg = {} if header is None: header = {} framed_msg['head'] = header framed_msg['body'] = body if six.PY2: return salt.utils.msgpack.dumps(framed_msg) else: return salt.utils.msgpack.dumps(framed_msg, use_bin_type=True)
def getBetas(idxPrc, aryPrfTc, lstAllMdlInd, aryFuncChnk, aryBstIndChnk, betaSw, queOut): """Calculate voxel betas and R^2 for the best model. Parameters ---------- idxPrc : TODO (?) aryPrfTc : np.array, shape (?) Population receptive field time courses. lstAllMdlInd : list List of the indices of all models. aryFuncChnk : TODO Chunk of something(?) aryBstIndChnk : np.array, shape (?) Points for every voxel to the index of the best model betaSw : str, iterator, or np.array, shape (?) Best beta correlation coefficients found in training. queOut : TODO Queue output (?) Notes ----- This is done after fitting with cross validation, since during the fitting process, we never fit the model to the entire data. """ # get number of motion directions varNumMtnDrctns = aryPrfTc.shape[3] varNumVoxChnk = aryBstIndChnk.shape[0] # prepare array for best beta weights if type(betaSw) is sklearn.model_selection._split.KFold: aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns, betaSw.get_n_splits()), dtype='float32') aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns, betaSw.get_n_splits()), dtype='float32') resTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits()), dtype='float32') resTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits()), dtype='float32') aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32') aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32') contrast = np.array([ [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], ]) denomTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits(), len(contrast)), dtype='float32') denomTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits(), len(contrast)), dtype='float32') elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool': aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns, ), dtype='float32') aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns, ), dtype='float32') resTrn = np.zeros((varNumVoxChnk), dtype='float32') resTst = np.zeros((varNumVoxChnk), dtype='float32') aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32') aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32') contrast = np.array([ [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], ]) denomTrn = np.zeros((varNumVoxChnk, len(contrast)), dtype='float32') denomTst = np.zeros((varNumVoxChnk, len(contrast)), dtype='float32') else: aryEstimMtnCrv = np.zeros((varNumVoxChnk, varNumMtnDrctns), dtype='float32') # prepare array for best residuals vecBstRes = np.zeros(varNumVoxChnk, dtype='float32') vecBstRes[:] = np.inf # prepare counter to check that every voxel is matched to one winner mdl vecLgcCounter = np.zeros(varNumVoxChnk, dtype='float32') # We reshape the voxel time courses, so that time goes down the column aryFuncChnk = aryFuncChnk.T # Change type to float 32: aryFuncChnk = aryFuncChnk.astype(np.float32) aryPrfTc = aryPrfTc.astype(np.float32) # Prepare status indicator if this is the first of the parallel processes: if idxPrc == 0: # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Number of pRF models to fit: varNumMdls = len(lstAllMdlInd) # Vector with pRF values at which to give status feedback: vecStatPrf = np.linspace(0, varNumMdls, num=(varStsStpSze+1), endpoint=True) vecStatPrf = np.ceil(vecStatPrf) vecStatPrf = vecStatPrf.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatPrc = np.ceil(vecStatPrc) vecStatPrc = vecStatPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # Loop through pRF models: for idx, mdlInd in enumerate(lstAllMdlInd): # Status indicator (only used in the first of the parallel # processes): if idxPrc == 0: # Status indicator: if varCntSts02 == vecStatPrf[varCntSts01]: # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatPrc[varCntSts01]) + ' % --- ' + str(vecStatPrf[varCntSts01]) + ' pRF models out of ' + str(varNumMdls)) print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # check whether any model had this particular x, y, sigma combination # as its best model lgcTemp = [aryBstIndChnk == idx][0] if np.greater(np.sum(lgcTemp), 0): # get current design matrix aryDsgnTmp = aryPrfTc[mdlInd].T if betaSw is 'train': # training aryTmpPrmEst, aryTmpRes = np.linalg.lstsq( aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0:2] aryEstimMtnCrv[lgcTemp, :] = aryTmpPrmEst.T vecBstRes[lgcTemp] = aryTmpRes elif type(betaSw) is np.ndarray and betaSw.dtype == 'float': # get beta weights for axis of motion tuning curves aryEstimMtnCrv[lgcTemp, :] = np.linalg.lstsq( aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0].T # calculate prediction aryPredTc = np.dot(aryDsgnTmp, betaSw[lgcTemp, :].T) # Sum of squares: vecBstRes[lgcTemp] = np.sum((aryFuncChnk[:, lgcTemp] - aryPredTc) ** 2, axis=0) elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool': # get beta weights for training betas, resTrn[lgcTemp] = np.linalg.lstsq( aryDsgnTmp[betaSw, :], aryFuncChnk[betaSw][:, lgcTemp])[0:2] aryEstimMtnCrvTrn[lgcTemp, :] = betas.T # get beta weights for validation betas, resTst[lgcTemp] = np.linalg.lstsq( aryDsgnTmp[~betaSw, :], aryFuncChnk[~betaSw][:, lgcTemp])[0:2] aryEstimMtnCrvTrn[lgcTemp, :] = betas.T # calculate CC for training aryCcTrn = np.linalg.pinv( np.dot(aryDsgnTmp[betaSw, :].T, aryDsgnTmp[betaSw, :])) aryCcTst = np.linalg.pinv( np.dot(aryDsgnTmp[~betaSw, :].T, aryDsgnTmp[~betaSw, :])) # calculate Error for training aryErrorTrn[lgcTemp] = np.var( np.subtract(aryFuncChnk[betaSw][:, lgcTemp], np.dot(aryDsgnTmp[betaSw, :], aryEstimMtnCrvTrn[lgcTemp, :].T)), axis=0) # calculate Error for test aryErrorTst[lgcTemp] = np.var( np.subtract(aryFuncChnk[~betaSw][:, lgcTemp], np.dot(aryDsgnTmp[~betaSw, :], aryEstimMtnCrvTst[lgcTemp, :].T)), axis=0) # calculate denominator for training for indContr, contr in enumerate(contrast): denomTrn[lgcTemp, indContr] = np.sqrt( aryErrorTrn[lgcTemp] * np.dot( np.dot(contr, aryCcTrn), contr.T)) denomTst[lgcTemp, indContr] = np.sqrt( aryErrorTst[lgcTemp] * np.dot( np.dot(contr, aryCcTst), contr.T)) elif type(betaSw) is sklearn.model_selection._split.KFold: for idxCV, (idxTrn, idxVal) in enumerate(betaSw.split(aryDsgnTmp)): # get beta weights for training betas, resTrn[lgcTemp, idxCV] = np.linalg.lstsq( aryDsgnTmp[idxTrn], aryFuncChnk[idxTrn][:, lgcTemp])[0:2] aryEstimMtnCrvTrn[lgcTemp, :, idxCV] = betas.T # get beta weights for validation betas, resTst[lgcTemp, idxCV] = np.linalg.lstsq( aryDsgnTmp[idxVal], aryFuncChnk[idxVal][:, lgcTemp])[0:2] aryEstimMtnCrvTst[lgcTemp, :, idxCV] = betas.T # calculate CC for training aryCcTrn = np.linalg.pinv( np.dot(aryDsgnTmp[idxTrn].T, aryDsgnTmp[idxTrn])) aryCcTst = np.linalg.pinv( np.dot(aryDsgnTmp[idxVal].T, aryDsgnTmp[idxVal])) # calculate Error for training aryErrorTrn[lgcTemp] = np.var( np.subtract(aryFuncChnk[idxTrn][:, lgcTemp], np.dot(aryDsgnTmp[idxTrn], aryEstimMtnCrvTrn[lgcTemp, :, idxCV].T)), axis=0) # calculate Error for test aryErrorTst[lgcTemp] = np.var( np.subtract(aryFuncChnk[idxVal][:, lgcTemp], np.dot(aryDsgnTmp[idxVal], aryEstimMtnCrvTst[lgcTemp, :, idxCV].T)), axis=0) # calculate denominator for training for indContr, contr in enumerate(contrast): denomTrn[lgcTemp, idxCV, indContr] = np.sqrt( aryErrorTrn[lgcTemp] * np.dot( np.dot(contr, aryCcTrn), contr.T)) denomTst[lgcTemp, idxCV, indContr] = np.sqrt( aryErrorTst[lgcTemp] * np.dot( np.dot(contr, aryCcTst), contr.T)) # increase logical counter to verify later that every voxel # was visited only once vecLgcCounter[lgcTemp] += 1 # Status indicator (only used in the first of the parallel # processes): if idxPrc == 0: # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # check that every voxel was visited only once strErrMsg = ('It looks like at least voxel was revisted more than once. ' + 'Check whether the R2 was calculated correctly') assert np.sum(vecLgcCounter) == len(vecLgcCounter), strErrMsg if type(betaSw) is sklearn.model_selection._split.KFold: # calculate t-values aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0], betaSw.get_n_splits())) aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0], betaSw.get_n_splits())) for ind1, contr in enumerate(contrast): for ind2 in range(betaSw.get_n_splits()): aryTvalsTrn[:, ind1, ind2] = np.divide( np.dot(contr, aryEstimMtnCrvTrn[:, :, ind2].T), denomTrn[:, ind2, ind1]) aryTvalsTst[:, ind1, ind2] = np.divide( np.dot(contr, aryEstimMtnCrvTst[:, :, ind2].T), denomTst[:, ind2, ind1]) # Output list: lstOut = [idxPrc, aryEstimMtnCrvTrn, aryEstimMtnCrvTst, aryTvalsTrn, aryTvalsTst, ] queOut.put(lstOut) elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool': # calculate t-values aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0], )) aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0], )) for ind1, contr in enumerate(contrast): aryTvalsTrn[:, ind1] = np.divide( np.dot(contr, aryEstimMtnCrvTrn.T), denomTrn[:, ind1]) aryTvalsTst[:, ind1] = np.divide( np.dot(contr, aryEstimMtnCrvTst.T), denomTst[:, ind1]) # Output list: lstOut = [idxPrc, aryEstimMtnCrvTrn, aryEstimMtnCrvTst, aryTvalsTrn, aryTvalsTst, ] queOut.put(lstOut) else: # After finding the best fitting model for each voxel, we still have to # calculate the coefficient of determination (R-squared) for each voxel. We # start by calculating the total sum of squares (i.e. the deviation of the # data from the mean). The mean of each time course: vecFuncMean = np.mean(aryFuncChnk, axis=0) # Deviation from the mean for each datapoint: vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :]) # Sum of squares: vecSsTot = np.sum(np.power(vecFuncDev, 2.0), axis=0) # Coefficient of determination: vecBstR2 = np.subtract(1.0, np.divide(vecBstRes, vecSsTot)) # Output list: lstOut = [idxPrc, vecBstR2, aryEstimMtnCrv] queOut.put(lstOut)
Calculate voxel betas and R^2 for the best model. Parameters ---------- idxPrc : TODO (?) aryPrfTc : np.array, shape (?) Population receptive field time courses. lstAllMdlInd : list List of the indices of all models. aryFuncChnk : TODO Chunk of something(?) aryBstIndChnk : np.array, shape (?) Points for every voxel to the index of the best model betaSw : str, iterator, or np.array, shape (?) Best beta correlation coefficients found in training. queOut : TODO Queue output (?) Notes ----- This is done after fitting with cross validation, since during the fitting process, we never fit the model to the entire data.
Below is the the instruction that describes the task: ### Input: Calculate voxel betas and R^2 for the best model. Parameters ---------- idxPrc : TODO (?) aryPrfTc : np.array, shape (?) Population receptive field time courses. lstAllMdlInd : list List of the indices of all models. aryFuncChnk : TODO Chunk of something(?) aryBstIndChnk : np.array, shape (?) Points for every voxel to the index of the best model betaSw : str, iterator, or np.array, shape (?) Best beta correlation coefficients found in training. queOut : TODO Queue output (?) Notes ----- This is done after fitting with cross validation, since during the fitting process, we never fit the model to the entire data. ### Response: def getBetas(idxPrc, aryPrfTc, lstAllMdlInd, aryFuncChnk, aryBstIndChnk, betaSw, queOut): """Calculate voxel betas and R^2 for the best model. Parameters ---------- idxPrc : TODO (?) aryPrfTc : np.array, shape (?) Population receptive field time courses. lstAllMdlInd : list List of the indices of all models. aryFuncChnk : TODO Chunk of something(?) aryBstIndChnk : np.array, shape (?) Points for every voxel to the index of the best model betaSw : str, iterator, or np.array, shape (?) Best beta correlation coefficients found in training. queOut : TODO Queue output (?) Notes ----- This is done after fitting with cross validation, since during the fitting process, we never fit the model to the entire data. """ # get number of motion directions varNumMtnDrctns = aryPrfTc.shape[3] varNumVoxChnk = aryBstIndChnk.shape[0] # prepare array for best beta weights if type(betaSw) is sklearn.model_selection._split.KFold: aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns, betaSw.get_n_splits()), dtype='float32') aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns, betaSw.get_n_splits()), dtype='float32') resTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits()), dtype='float32') resTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits()), dtype='float32') aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32') aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32') contrast = np.array([ [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], ]) denomTrn = np.zeros((varNumVoxChnk, betaSw.get_n_splits(), len(contrast)), dtype='float32') denomTst = np.zeros((varNumVoxChnk, betaSw.get_n_splits(), len(contrast)), dtype='float32') elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool': aryEstimMtnCrvTrn = np.zeros((varNumVoxChnk, varNumMtnDrctns, ), dtype='float32') aryEstimMtnCrvTst = np.zeros((varNumVoxChnk, varNumMtnDrctns, ), dtype='float32') resTrn = np.zeros((varNumVoxChnk), dtype='float32') resTst = np.zeros((varNumVoxChnk), dtype='float32') aryErrorTrn = np.zeros((varNumVoxChnk), dtype='float32') aryErrorTst = np.zeros((varNumVoxChnk), dtype='float32') contrast = np.array([ [1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], ]) denomTrn = np.zeros((varNumVoxChnk, len(contrast)), dtype='float32') denomTst = np.zeros((varNumVoxChnk, len(contrast)), dtype='float32') else: aryEstimMtnCrv = np.zeros((varNumVoxChnk, varNumMtnDrctns), dtype='float32') # prepare array for best residuals vecBstRes = np.zeros(varNumVoxChnk, dtype='float32') vecBstRes[:] = np.inf # prepare counter to check that every voxel is matched to one winner mdl vecLgcCounter = np.zeros(varNumVoxChnk, dtype='float32') # We reshape the voxel time courses, so that time goes down the column aryFuncChnk = aryFuncChnk.T # Change type to float 32: aryFuncChnk = aryFuncChnk.astype(np.float32) aryPrfTc = aryPrfTc.astype(np.float32) # Prepare status indicator if this is the first of the parallel processes: if idxPrc == 0: # We create a status indicator for the time consuming pRF model finding # algorithm. Number of steps of the status indicator: varStsStpSze = 20 # Number of pRF models to fit: varNumMdls = len(lstAllMdlInd) # Vector with pRF values at which to give status feedback: vecStatPrf = np.linspace(0, varNumMdls, num=(varStsStpSze+1), endpoint=True) vecStatPrf = np.ceil(vecStatPrf) vecStatPrf = vecStatPrf.astype(int) # Vector with corresponding percentage values at which to give status # feedback: vecStatPrc = np.linspace(0, 100, num=(varStsStpSze+1), endpoint=True) vecStatPrc = np.ceil(vecStatPrc) vecStatPrc = vecStatPrc.astype(int) # Counter for status indicator: varCntSts01 = 0 varCntSts02 = 0 # Loop through pRF models: for idx, mdlInd in enumerate(lstAllMdlInd): # Status indicator (only used in the first of the parallel # processes): if idxPrc == 0: # Status indicator: if varCntSts02 == vecStatPrf[varCntSts01]: # Prepare status message: strStsMsg = ('---------Progress: ' + str(vecStatPrc[varCntSts01]) + ' % --- ' + str(vecStatPrf[varCntSts01]) + ' pRF models out of ' + str(varNumMdls)) print(strStsMsg) # Only increment counter if the last value has not been # reached yet: if varCntSts01 < varStsStpSze: varCntSts01 = varCntSts01 + int(1) # check whether any model had this particular x, y, sigma combination # as its best model lgcTemp = [aryBstIndChnk == idx][0] if np.greater(np.sum(lgcTemp), 0): # get current design matrix aryDsgnTmp = aryPrfTc[mdlInd].T if betaSw is 'train': # training aryTmpPrmEst, aryTmpRes = np.linalg.lstsq( aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0:2] aryEstimMtnCrv[lgcTemp, :] = aryTmpPrmEst.T vecBstRes[lgcTemp] = aryTmpRes elif type(betaSw) is np.ndarray and betaSw.dtype == 'float': # get beta weights for axis of motion tuning curves aryEstimMtnCrv[lgcTemp, :] = np.linalg.lstsq( aryDsgnTmp, aryFuncChnk[:, lgcTemp])[0].T # calculate prediction aryPredTc = np.dot(aryDsgnTmp, betaSw[lgcTemp, :].T) # Sum of squares: vecBstRes[lgcTemp] = np.sum((aryFuncChnk[:, lgcTemp] - aryPredTc) ** 2, axis=0) elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool': # get beta weights for training betas, resTrn[lgcTemp] = np.linalg.lstsq( aryDsgnTmp[betaSw, :], aryFuncChnk[betaSw][:, lgcTemp])[0:2] aryEstimMtnCrvTrn[lgcTemp, :] = betas.T # get beta weights for validation betas, resTst[lgcTemp] = np.linalg.lstsq( aryDsgnTmp[~betaSw, :], aryFuncChnk[~betaSw][:, lgcTemp])[0:2] aryEstimMtnCrvTrn[lgcTemp, :] = betas.T # calculate CC for training aryCcTrn = np.linalg.pinv( np.dot(aryDsgnTmp[betaSw, :].T, aryDsgnTmp[betaSw, :])) aryCcTst = np.linalg.pinv( np.dot(aryDsgnTmp[~betaSw, :].T, aryDsgnTmp[~betaSw, :])) # calculate Error for training aryErrorTrn[lgcTemp] = np.var( np.subtract(aryFuncChnk[betaSw][:, lgcTemp], np.dot(aryDsgnTmp[betaSw, :], aryEstimMtnCrvTrn[lgcTemp, :].T)), axis=0) # calculate Error for test aryErrorTst[lgcTemp] = np.var( np.subtract(aryFuncChnk[~betaSw][:, lgcTemp], np.dot(aryDsgnTmp[~betaSw, :], aryEstimMtnCrvTst[lgcTemp, :].T)), axis=0) # calculate denominator for training for indContr, contr in enumerate(contrast): denomTrn[lgcTemp, indContr] = np.sqrt( aryErrorTrn[lgcTemp] * np.dot( np.dot(contr, aryCcTrn), contr.T)) denomTst[lgcTemp, indContr] = np.sqrt( aryErrorTst[lgcTemp] * np.dot( np.dot(contr, aryCcTst), contr.T)) elif type(betaSw) is sklearn.model_selection._split.KFold: for idxCV, (idxTrn, idxVal) in enumerate(betaSw.split(aryDsgnTmp)): # get beta weights for training betas, resTrn[lgcTemp, idxCV] = np.linalg.lstsq( aryDsgnTmp[idxTrn], aryFuncChnk[idxTrn][:, lgcTemp])[0:2] aryEstimMtnCrvTrn[lgcTemp, :, idxCV] = betas.T # get beta weights for validation betas, resTst[lgcTemp, idxCV] = np.linalg.lstsq( aryDsgnTmp[idxVal], aryFuncChnk[idxVal][:, lgcTemp])[0:2] aryEstimMtnCrvTst[lgcTemp, :, idxCV] = betas.T # calculate CC for training aryCcTrn = np.linalg.pinv( np.dot(aryDsgnTmp[idxTrn].T, aryDsgnTmp[idxTrn])) aryCcTst = np.linalg.pinv( np.dot(aryDsgnTmp[idxVal].T, aryDsgnTmp[idxVal])) # calculate Error for training aryErrorTrn[lgcTemp] = np.var( np.subtract(aryFuncChnk[idxTrn][:, lgcTemp], np.dot(aryDsgnTmp[idxTrn], aryEstimMtnCrvTrn[lgcTemp, :, idxCV].T)), axis=0) # calculate Error for test aryErrorTst[lgcTemp] = np.var( np.subtract(aryFuncChnk[idxVal][:, lgcTemp], np.dot(aryDsgnTmp[idxVal], aryEstimMtnCrvTst[lgcTemp, :, idxCV].T)), axis=0) # calculate denominator for training for indContr, contr in enumerate(contrast): denomTrn[lgcTemp, idxCV, indContr] = np.sqrt( aryErrorTrn[lgcTemp] * np.dot( np.dot(contr, aryCcTrn), contr.T)) denomTst[lgcTemp, idxCV, indContr] = np.sqrt( aryErrorTst[lgcTemp] * np.dot( np.dot(contr, aryCcTst), contr.T)) # increase logical counter to verify later that every voxel # was visited only once vecLgcCounter[lgcTemp] += 1 # Status indicator (only used in the first of the parallel # processes): if idxPrc == 0: # Increment status indicator counter: varCntSts02 = varCntSts02 + 1 # check that every voxel was visited only once strErrMsg = ('It looks like at least voxel was revisted more than once. ' + 'Check whether the R2 was calculated correctly') assert np.sum(vecLgcCounter) == len(vecLgcCounter), strErrMsg if type(betaSw) is sklearn.model_selection._split.KFold: # calculate t-values aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0], betaSw.get_n_splits())) aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0], betaSw.get_n_splits())) for ind1, contr in enumerate(contrast): for ind2 in range(betaSw.get_n_splits()): aryTvalsTrn[:, ind1, ind2] = np.divide( np.dot(contr, aryEstimMtnCrvTrn[:, :, ind2].T), denomTrn[:, ind2, ind1]) aryTvalsTst[:, ind1, ind2] = np.divide( np.dot(contr, aryEstimMtnCrvTst[:, :, ind2].T), denomTst[:, ind2, ind1]) # Output list: lstOut = [idxPrc, aryEstimMtnCrvTrn, aryEstimMtnCrvTst, aryTvalsTrn, aryTvalsTst, ] queOut.put(lstOut) elif type(betaSw) is np.ndarray and betaSw.dtype == 'bool': # calculate t-values aryTvalsTrn = np.empty((varNumVoxChnk, contrast.shape[0], )) aryTvalsTst = np.empty((varNumVoxChnk, contrast.shape[0], )) for ind1, contr in enumerate(contrast): aryTvalsTrn[:, ind1] = np.divide( np.dot(contr, aryEstimMtnCrvTrn.T), denomTrn[:, ind1]) aryTvalsTst[:, ind1] = np.divide( np.dot(contr, aryEstimMtnCrvTst.T), denomTst[:, ind1]) # Output list: lstOut = [idxPrc, aryEstimMtnCrvTrn, aryEstimMtnCrvTst, aryTvalsTrn, aryTvalsTst, ] queOut.put(lstOut) else: # After finding the best fitting model for each voxel, we still have to # calculate the coefficient of determination (R-squared) for each voxel. We # start by calculating the total sum of squares (i.e. the deviation of the # data from the mean). The mean of each time course: vecFuncMean = np.mean(aryFuncChnk, axis=0) # Deviation from the mean for each datapoint: vecFuncDev = np.subtract(aryFuncChnk, vecFuncMean[None, :]) # Sum of squares: vecSsTot = np.sum(np.power(vecFuncDev, 2.0), axis=0) # Coefficient of determination: vecBstR2 = np.subtract(1.0, np.divide(vecBstRes, vecSsTot)) # Output list: lstOut = [idxPrc, vecBstR2, aryEstimMtnCrv] queOut.put(lstOut)
def parse_pkginfo(line, osarch=None): ''' A small helper to parse an rpm/repoquery command's output. Returns a pkginfo namedtuple. ''' try: name, epoch, version, release, arch, repoid, install_time = line.split('_|-') # Handle unpack errors (should never happen with the queryformat we are # using, but can't hurt to be careful). except ValueError: return None name = resolve_name(name, arch, osarch) if release: version += '-{0}'.format(release) if epoch not in ('(none)', '0'): version = ':'.join((epoch, version)) if install_time not in ('(none)', '0'): install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" install_date_time_t = int(install_time) else: install_date = None install_date_time_t = None return pkginfo(name, version, arch, repoid, install_date, install_date_time_t)
A small helper to parse an rpm/repoquery command's output. Returns a pkginfo namedtuple.
Below is the the instruction that describes the task: ### Input: A small helper to parse an rpm/repoquery command's output. Returns a pkginfo namedtuple. ### Response: def parse_pkginfo(line, osarch=None): ''' A small helper to parse an rpm/repoquery command's output. Returns a pkginfo namedtuple. ''' try: name, epoch, version, release, arch, repoid, install_time = line.split('_|-') # Handle unpack errors (should never happen with the queryformat we are # using, but can't hurt to be careful). except ValueError: return None name = resolve_name(name, arch, osarch) if release: version += '-{0}'.format(release) if epoch not in ('(none)', '0'): version = ':'.join((epoch, version)) if install_time not in ('(none)', '0'): install_date = datetime.datetime.utcfromtimestamp(int(install_time)).isoformat() + "Z" install_date_time_t = int(install_time) else: install_date = None install_date_time_t = None return pkginfo(name, version, arch, repoid, install_date, install_date_time_t)
def format_close(code: int, reason: str) -> str: """ Display a human-readable version of the close code and reason. """ if 3000 <= code < 4000: explanation = "registered" elif 4000 <= code < 5000: explanation = "private use" else: explanation = CLOSE_CODES.get(code, "unknown") result = f"code = {code} ({explanation}), " if reason: result += f"reason = {reason}" else: result += "no reason" return result
Display a human-readable version of the close code and reason.
Below is the the instruction that describes the task: ### Input: Display a human-readable version of the close code and reason. ### Response: def format_close(code: int, reason: str) -> str: """ Display a human-readable version of the close code and reason. """ if 3000 <= code < 4000: explanation = "registered" elif 4000 <= code < 5000: explanation = "private use" else: explanation = CLOSE_CODES.get(code, "unknown") result = f"code = {code} ({explanation}), " if reason: result += f"reason = {reason}" else: result += "no reason" return result
def get_context(self): """ returns the context dict for this statement :rtype: dict """ ctx = {} for clause in self.where_clauses or []: clause.update_context(ctx) return ctx
returns the context dict for this statement :rtype: dict
Below is the the instruction that describes the task: ### Input: returns the context dict for this statement :rtype: dict ### Response: def get_context(self): """ returns the context dict for this statement :rtype: dict """ ctx = {} for clause in self.where_clauses or []: clause.update_context(ctx) return ctx
def _create_all_tables(self): """ Creates all the required tables by calling the required functions. :return: """ self._create_post_table() self._create_tag_table() self._create_tag_posts_table() self._create_user_posts_table()
Creates all the required tables by calling the required functions. :return:
Below is the the instruction that describes the task: ### Input: Creates all the required tables by calling the required functions. :return: ### Response: def _create_all_tables(self): """ Creates all the required tables by calling the required functions. :return: """ self._create_post_table() self._create_tag_table() self._create_tag_posts_table() self._create_user_posts_table()
def _process_module_needs(self, modules): """Adds the module and its dependencies to the result list in dependency order.""" result = list(modules) for i, module in enumerate(modules): #It is possible that the parser couldn't find it, if so #we can't create the executable! if module in self.module.parent.modules: modneeds = self.module.parent.modules[module].needs for modn in modneeds: if modn not in result: #Since this module depends on the other, insert the other #above it in the list. result.insert(i, modn) else: x = result.index(modn) if x > i: #We need to move this module higher up in the food chain #because it is needed sooner. result.remove(modn) result.insert(i, modn) newi = result.index(modn) else: raise ValueError("Unable to find module {}.".format(module)) return result
Adds the module and its dependencies to the result list in dependency order.
Below is the the instruction that describes the task: ### Input: Adds the module and its dependencies to the result list in dependency order. ### Response: def _process_module_needs(self, modules): """Adds the module and its dependencies to the result list in dependency order.""" result = list(modules) for i, module in enumerate(modules): #It is possible that the parser couldn't find it, if so #we can't create the executable! if module in self.module.parent.modules: modneeds = self.module.parent.modules[module].needs for modn in modneeds: if modn not in result: #Since this module depends on the other, insert the other #above it in the list. result.insert(i, modn) else: x = result.index(modn) if x > i: #We need to move this module higher up in the food chain #because it is needed sooner. result.remove(modn) result.insert(i, modn) newi = result.index(modn) else: raise ValueError("Unable to find module {}.".format(module)) return result
def print_torrent(self): """ Print the details of a torrent """ print('Title: %s' % self.title) print('URL: %s' % self.url) print('Category: %s' % self.category) print('Sub-Category: %s' % self.sub_category) print('Magnet Link: %s' % self.magnet_link) print('Torrent Link: %s' % self.torrent_link) print('Uploaded: %s' % self.created) print('Comments: %d' % self.comments) print('Has Cover Image: %s' % self.has_cover) print('User Status: %s' % self.user_status) print('Size: %s' % self.size) print('User: %s' % self.user) print('Seeders: %d' % self.seeders) print('Leechers: %d' % self.leechers)
Print the details of a torrent
Below is the the instruction that describes the task: ### Input: Print the details of a torrent ### Response: def print_torrent(self): """ Print the details of a torrent """ print('Title: %s' % self.title) print('URL: %s' % self.url) print('Category: %s' % self.category) print('Sub-Category: %s' % self.sub_category) print('Magnet Link: %s' % self.magnet_link) print('Torrent Link: %s' % self.torrent_link) print('Uploaded: %s' % self.created) print('Comments: %d' % self.comments) print('Has Cover Image: %s' % self.has_cover) print('User Status: %s' % self.user_status) print('Size: %s' % self.size) print('User: %s' % self.user) print('Seeders: %d' % self.seeders) print('Leechers: %d' % self.leechers)
def install(args: List[str]) -> None: """`pip install` as a function. Accepts a list of pip arguments. .. code-block:: py >>> install(['numpy', '--target', 'site-packages']) Collecting numpy Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB) 100% || 16.9MB 53kB/s Installing collected packages: numpy Successfully installed numpy-1.13.3 """ with clean_pip_env(): # if being invoked as a pyz, we must ensure we have access to our own # site-packages when subprocessing since there is no guarantee that pip # will be available subprocess_env = os.environ.copy() sitedir_index = _first_sitedir_index() _extend_python_path(subprocess_env, sys.path[sitedir_index:]) process = subprocess.Popen( [sys.executable, "-m", "pip", "--disable-pip-version-check", "install"] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=subprocess_env, ) for output in process.stdout: if output: click.echo(output.decode().rstrip()) if process.wait() > 0: sys.exit(PIP_INSTALL_ERROR)
`pip install` as a function. Accepts a list of pip arguments. .. code-block:: py >>> install(['numpy', '--target', 'site-packages']) Collecting numpy Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB) 100% || 16.9MB 53kB/s Installing collected packages: numpy Successfully installed numpy-1.13.3
Below is the the instruction that describes the task: ### Input: `pip install` as a function. Accepts a list of pip arguments. .. code-block:: py >>> install(['numpy', '--target', 'site-packages']) Collecting numpy Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB) 100% || 16.9MB 53kB/s Installing collected packages: numpy Successfully installed numpy-1.13.3 ### Response: def install(args: List[str]) -> None: """`pip install` as a function. Accepts a list of pip arguments. .. code-block:: py >>> install(['numpy', '--target', 'site-packages']) Collecting numpy Downloading numpy-1.13.3-cp35-cp35m-manylinux1_x86_64.whl (16.9MB) 100% || 16.9MB 53kB/s Installing collected packages: numpy Successfully installed numpy-1.13.3 """ with clean_pip_env(): # if being invoked as a pyz, we must ensure we have access to our own # site-packages when subprocessing since there is no guarantee that pip # will be available subprocess_env = os.environ.copy() sitedir_index = _first_sitedir_index() _extend_python_path(subprocess_env, sys.path[sitedir_index:]) process = subprocess.Popen( [sys.executable, "-m", "pip", "--disable-pip-version-check", "install"] + args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=subprocess_env, ) for output in process.stdout: if output: click.echo(output.decode().rstrip()) if process.wait() > 0: sys.exit(PIP_INSTALL_ERROR)
def discover_roku(): """ Search LAN for available Roku devices. Returns a Roku object. """ print("Searching for Roku devices within LAN ...") rokus = Roku.discover() if not rokus: print("Unable to discover Roku devices. " + "Try again, or manually specify the IP address with " + "\'roku <ipaddr>\' (e.g. roku 192.168.1.130)") return None print("Found the following Roku devices:") for i, r in enumerate(rokus): # dinfo = ' '.join(re.split(', |: ', str(r.device_info))[1:3]) dinfo = '' print("[" + str(i+1) + "] " + str(r.host) + ":" + str(r.port) + ' (' + dinfo + ')') print("") if len(rokus) == 1: print("Selecting Roku 1 by default") return rokus[0] else: print("Multiple Rokus found. Select the index of the Roku to control:") while True: try: query = "Select (1 to " + str(len(rokus)) + ") > " sel = int(input(query)) - 1 if sel >= len(rokus): raise ValueError else: break except ValueError: print("Invalid selection") return rokus[sel]
Search LAN for available Roku devices. Returns a Roku object.
Below is the the instruction that describes the task: ### Input: Search LAN for available Roku devices. Returns a Roku object. ### Response: def discover_roku(): """ Search LAN for available Roku devices. Returns a Roku object. """ print("Searching for Roku devices within LAN ...") rokus = Roku.discover() if not rokus: print("Unable to discover Roku devices. " + "Try again, or manually specify the IP address with " + "\'roku <ipaddr>\' (e.g. roku 192.168.1.130)") return None print("Found the following Roku devices:") for i, r in enumerate(rokus): # dinfo = ' '.join(re.split(', |: ', str(r.device_info))[1:3]) dinfo = '' print("[" + str(i+1) + "] " + str(r.host) + ":" + str(r.port) + ' (' + dinfo + ')') print("") if len(rokus) == 1: print("Selecting Roku 1 by default") return rokus[0] else: print("Multiple Rokus found. Select the index of the Roku to control:") while True: try: query = "Select (1 to " + str(len(rokus)) + ") > " sel = int(input(query)) - 1 if sel >= len(rokus): raise ValueError else: break except ValueError: print("Invalid selection") return rokus[sel]
def get_registered(option_hooks=None, event_hooks=None, command_hooks=None, root_access=None, task_active=True): """ Returns a generator of registered plugins matching filters. `option_hooks` Boolean to include or exclude plugins using option hooks. `event_hooks` Boolean to include or exclude task event plugins. `command_hooks` Boolean to include or exclude command plugins. `root_access` Boolean to include or exclude root plugins. `task_active` Set to ``False`` to not filter by task-based plugins. Returns list of ``Plugin`` instances. """ plugins = [] for _, item in _registered: plugin, type_info = item # filter out any task-specific plugins if task_active: if type_info.get('disabled'): continue else: if plugin.options or plugin.task_only: continue if not option_hooks is None: if option_hooks != bool(type_info.get('option')): continue if not event_hooks is None: if event_hooks != bool(type_info.get('event')): continue if not command_hooks is None: if command_hooks != bool(type_info.get('command')): continue if not root_access is None: if root_access != plugin.needs_root: continue plugins.append(plugin) return plugins
Returns a generator of registered plugins matching filters. `option_hooks` Boolean to include or exclude plugins using option hooks. `event_hooks` Boolean to include or exclude task event plugins. `command_hooks` Boolean to include or exclude command plugins. `root_access` Boolean to include or exclude root plugins. `task_active` Set to ``False`` to not filter by task-based plugins. Returns list of ``Plugin`` instances.
Below is the the instruction that describes the task: ### Input: Returns a generator of registered plugins matching filters. `option_hooks` Boolean to include or exclude plugins using option hooks. `event_hooks` Boolean to include or exclude task event plugins. `command_hooks` Boolean to include or exclude command plugins. `root_access` Boolean to include or exclude root plugins. `task_active` Set to ``False`` to not filter by task-based plugins. Returns list of ``Plugin`` instances. ### Response: def get_registered(option_hooks=None, event_hooks=None, command_hooks=None, root_access=None, task_active=True): """ Returns a generator of registered plugins matching filters. `option_hooks` Boolean to include or exclude plugins using option hooks. `event_hooks` Boolean to include or exclude task event plugins. `command_hooks` Boolean to include or exclude command plugins. `root_access` Boolean to include or exclude root plugins. `task_active` Set to ``False`` to not filter by task-based plugins. Returns list of ``Plugin`` instances. """ plugins = [] for _, item in _registered: plugin, type_info = item # filter out any task-specific plugins if task_active: if type_info.get('disabled'): continue else: if plugin.options or plugin.task_only: continue if not option_hooks is None: if option_hooks != bool(type_info.get('option')): continue if not event_hooks is None: if event_hooks != bool(type_info.get('event')): continue if not command_hooks is None: if command_hooks != bool(type_info.get('command')): continue if not root_access is None: if root_access != plugin.needs_root: continue plugins.append(plugin) return plugins
def parse_definition(self, class_): """Parse a definition and return its value in a `class_` object.""" start = self.line self.consume(tk.NAME) name = self.current.value self.log.debug("parsing %s '%s'", class_.__name__, name) self.stream.move() if self.current.kind == tk.OP and self.current.value == '(': parenthesis_level = 0 while True: if self.current.kind == tk.OP: if self.current.value == '(': parenthesis_level += 1 elif self.current.value == ')': parenthesis_level -= 1 if parenthesis_level == 0: break self.stream.move() if self.current.kind != tk.OP or self.current.value != ':': self.leapfrog(tk.OP, value=":") else: self.consume(tk.OP) if self.current.kind in (tk.NEWLINE, tk.COMMENT): skipped_error_codes = self.parse_skip_comment() self.leapfrog(tk.INDENT) assert self.current.kind != tk.INDENT docstring = self.parse_docstring() decorators = self._accumulated_decorators self.log.debug("current accumulated decorators: %s", decorators) self._accumulated_decorators = [] self.log.debug("parsing nested definitions.") children = list(self.parse_definitions(class_)) self.log.debug("finished parsing nested definitions for '%s'", name) end = self.line - 1 else: # one-liner definition skipped_error_codes = '' docstring = self.parse_docstring() decorators = [] # TODO children = [] end = self.line self.leapfrog(tk.NEWLINE) definition = class_(name, self.source, start, end, decorators, docstring, children, None, skipped_error_codes) for child in definition.children: child.parent = definition self.log.debug("finished parsing %s '%s'. Next token is %r", class_.__name__, name, self.current) return definition
Parse a definition and return its value in a `class_` object.
Below is the the instruction that describes the task: ### Input: Parse a definition and return its value in a `class_` object. ### Response: def parse_definition(self, class_): """Parse a definition and return its value in a `class_` object.""" start = self.line self.consume(tk.NAME) name = self.current.value self.log.debug("parsing %s '%s'", class_.__name__, name) self.stream.move() if self.current.kind == tk.OP and self.current.value == '(': parenthesis_level = 0 while True: if self.current.kind == tk.OP: if self.current.value == '(': parenthesis_level += 1 elif self.current.value == ')': parenthesis_level -= 1 if parenthesis_level == 0: break self.stream.move() if self.current.kind != tk.OP or self.current.value != ':': self.leapfrog(tk.OP, value=":") else: self.consume(tk.OP) if self.current.kind in (tk.NEWLINE, tk.COMMENT): skipped_error_codes = self.parse_skip_comment() self.leapfrog(tk.INDENT) assert self.current.kind != tk.INDENT docstring = self.parse_docstring() decorators = self._accumulated_decorators self.log.debug("current accumulated decorators: %s", decorators) self._accumulated_decorators = [] self.log.debug("parsing nested definitions.") children = list(self.parse_definitions(class_)) self.log.debug("finished parsing nested definitions for '%s'", name) end = self.line - 1 else: # one-liner definition skipped_error_codes = '' docstring = self.parse_docstring() decorators = [] # TODO children = [] end = self.line self.leapfrog(tk.NEWLINE) definition = class_(name, self.source, start, end, decorators, docstring, children, None, skipped_error_codes) for child in definition.children: child.parent = definition self.log.debug("finished parsing %s '%s'. Next token is %r", class_.__name__, name, self.current) return definition
def _check_align(self): """Check if alignment has been specified, set default one if not """ if not hasattr(self, "_align"): self._align = ["l"]*self._row_size if not hasattr(self, "_valign"): self._valign = ["t"]*self._row_size
Check if alignment has been specified, set default one if not
Below is the the instruction that describes the task: ### Input: Check if alignment has been specified, set default one if not ### Response: def _check_align(self): """Check if alignment has been specified, set default one if not """ if not hasattr(self, "_align"): self._align = ["l"]*self._row_size if not hasattr(self, "_valign"): self._valign = ["t"]*self._row_size
def noise_normalization_from_noise_map_and_mask(noise_map, mask): """Compute the noise-map normalization terms of a list of masked 1D noise-maps, summing the noise_map vale in every pixel as: [Noise_Term] = sum(log(2*pi*[Noise]**2.0)) Parameters ---------- noise_map : np.ndarray The masked noise-map of the observed data. mask : np.ndarray The mask applied to the noise-map, where *False* entries are included in the calculation. """ return np.sum(np.log(2 * np.pi * noise_map[np.asarray(mask) == 0] ** 2.0))
Compute the noise-map normalization terms of a list of masked 1D noise-maps, summing the noise_map vale in every pixel as: [Noise_Term] = sum(log(2*pi*[Noise]**2.0)) Parameters ---------- noise_map : np.ndarray The masked noise-map of the observed data. mask : np.ndarray The mask applied to the noise-map, where *False* entries are included in the calculation.
Below is the the instruction that describes the task: ### Input: Compute the noise-map normalization terms of a list of masked 1D noise-maps, summing the noise_map vale in every pixel as: [Noise_Term] = sum(log(2*pi*[Noise]**2.0)) Parameters ---------- noise_map : np.ndarray The masked noise-map of the observed data. mask : np.ndarray The mask applied to the noise-map, where *False* entries are included in the calculation. ### Response: def noise_normalization_from_noise_map_and_mask(noise_map, mask): """Compute the noise-map normalization terms of a list of masked 1D noise-maps, summing the noise_map vale in every pixel as: [Noise_Term] = sum(log(2*pi*[Noise]**2.0)) Parameters ---------- noise_map : np.ndarray The masked noise-map of the observed data. mask : np.ndarray The mask applied to the noise-map, where *False* entries are included in the calculation. """ return np.sum(np.log(2 * np.pi * noise_map[np.asarray(mask) == 0] ** 2.0))
def show_guidance_msg(self, unique_id, input_lines, hash_key, guidance_flag=False): """ Based on the student's answer (input_lines), we grab each associated message if its corresponding misunderstanding's count is above the threshold """ if self.load_error: print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG response = repr(input_lines) self.set_tg() log.info("Guidance TG is %d", self.tg_id) if self.tg_id == TG_ERROR_VALUE: # If self.tg_id == -1, there was an error when trying to access the server log.warning("Error when trying to access server. TG == -1") print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG lambda_string_key = self.guidance_json[ 'dictTg2Func'].get(str(self.tg_id)) if not lambda_string_key: log.info("Cannot find the correct lambda in the dictionary.") print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG log.info("Lambda Group: %s", lambda_string_key) lambda_info_misu = lambda_string_key_to_func.get(lambda_string_key) if not lambda_info_misu: log.info("Cannot find info misU given the lambda string key.") print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG shorten_unique_id = assess_id_util.canonicalize(unique_id) # Try to get the info dictionary for this question. Maps wrong answer # to dictionary assess_dict_info = self.guidance_json[ 'dictAssessId2Info'].get(shorten_unique_id) if not assess_dict_info: log.info("shorten_unique_id %s is not in dictAssessId2Info", repr(shorten_unique_id)) print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG wa_details = assess_dict_info['dictWA2DictInfo'].get(response) if not wa_details: log.info("Cannot find the wrong answer in the WA2Dict for this assesment.") lst_mis_u = [] else: lst_mis_u = wa_details.get('lstMisU', []) # No list of misunderstandings for this wrong answer, default message if not lst_mis_u: log.info("Cannot find the list of misunderstandings.") wa_count_threshold = self.guidance_json['wrongAnsThresh'] wa_lst_assess_num = assess_dict_info['dictWA2LstAssessNum_WA'] msg_id_set = set() should_skip_propagation = self.tg_id == 3 or self.tg_id == 4 answerDict, countData = self.get_misUdata() prev_responses = answerDict.get(shorten_unique_id, []) # Confirm that this WA has not been given before seen_before = response in prev_responses if seen_before: log.info("Answer has been seen before: {}".format(response)) else: answerDict[shorten_unique_id] = prev_responses + [response] self.save_misUdata(answerDict, countData) # Lookup the list of assessNum and WA related to this wrong answer # in the question's dictWA2LstAssessNum_WA lst_assess_num = wa_lst_assess_num.get(response, []) if not lst_assess_num: log.info("Cannot get the lst of assess nums given this reponse.") log.debug("Related LST_ASSESS_NUM: %s", lst_assess_num) # Check if the current wrong answer is in the question's dictWA2DictInfo if wa_details: log.info("The current wrong answer (%s) is in dictWA2DictInfo", response) # Check in answerDict to see if the student has ever given # any of these wrong answers (sourced from dictWA2LstAssessNum_WA) num_prev_responses = 1 for other_num, other_resp in lst_assess_num: # Get assess_id other_id = self.get_aid_from_anum(other_num) log.info("Checking if %s is in answerDict[%s]", other_resp, repr(other_id)) if other_resp in answerDict.get(other_id, []): log.debug("%s is in answerDict[%s]", other_resp, repr(other_id)) num_prev_responses += 1 log.info("Has given %d previous responses in lst_assess_num", num_prev_responses) if not should_skip_propagation: # Increment countDict by the number of wrong answers seen # for each tag assoicated with this wrong answerDict increment = num_prev_responses for misu in lst_mis_u: log.info("Updating the count of misu: %s by %s", misu, increment) countData[misu] = countData.get(misu, 0) + increment for misu in lst_mis_u: log.debug("Misu: %s has count %s", misu, countData.get(misu, 0)) if countData.get(misu, 0) >= wa_count_threshold: msg_info = lambda_info_misu(wa_details, misu) if msg_info: msg_id_set.add(msg_info) elif not should_skip_propagation: # Lookup the lst_mis_u of each wrong answer in the list of wrong # answers related to the current wrong answer (lst_assess_num), # using dictAssessNum2AssessId assess_num_to_aid = self.guidance_json['dictAssessNum2AssessId'] log.debug("Looking up the lst_misu_u of all related WA") # misu -> list of wrong answers for that related_misu_tags_dict = {} for related_num, related_resp in lst_assess_num: related_aid = assess_num_to_aid.get(related_num) log.info("Getting related resp %s for AID %s", repr(related_aid), related_resp) resp_seen_before = related_resp in answerDict.get(related_aid, []) if not resp_seen_before: continue # Get the lst_misu for this asssigmment related_info = self.guidance_json['dictAssessId2Info'].get(related_aid) if not related_info: log.info("Could not find related id: %s in info dict", related_aid) continue related_wa_info = related_info['dictWA2DictInfo'].get(related_resp) if not related_info: log.info("Could not find response %s in %s info dict", related_resp, related_aid) continue related_misu_list = related_wa_info.get('lstMisU', []) log.info("The related MISU list is %s", related_misu_list) for misu in related_misu_list: existing_resps = related_misu_tags_dict.get(misu, []) # Add dictWA2DictInfo to list of responses for this misunderstanding. related_misu_tags_dict[misu] = existing_resps + [related_wa_info] # Increment countDict for each tag in the set of tags for each related resp countData[misu] = countData.get(misu, 0) + 1 for misu, lst_wa_info in related_misu_tags_dict.items(): if countData.get(misu, 0) >= wa_count_threshold: for wa_info in lst_wa_info: msg_id_set.add(lambda_info_misu(wa_info, misu)) else: log.info("misu %s seen %s/%s times", misu, countData.get(misu, 0), wa_count_threshold) self.save_misUdata(answerDict, countData) wa_lst_explain_responses = assess_dict_info.get('lstWrongAnsWatch', []) if response in wa_lst_explain_responses: rationale = self.prompt_with_prob(orig_response=input_lines, prob=1.0) else: rationale = self.prompt_with_prob(orig_response=input_lines) if len(msg_id_set) == 0: log.info("No messages to display.") print(GUIDANCE_DEFAULT_MSG) return (countData, self.tg_id, [], rationale) print("\n-- Helpful Hint --") printed_out_msgs = [] for message_id in msg_id_set: msg = self.guidance_json['dictId2Msg'].get(str(message_id)) if msg: printed_out_msgs.append(msg) print(msg) print("-"*18) else: log.info("{} did not have a message".format(message_id)) print() print(GUIDANCE_DEFAULT_MSG) return (countData, self.tg_id, printed_out_msgs, rationale)
Based on the student's answer (input_lines), we grab each associated message if its corresponding misunderstanding's count is above the threshold
Below is the the instruction that describes the task: ### Input: Based on the student's answer (input_lines), we grab each associated message if its corresponding misunderstanding's count is above the threshold ### Response: def show_guidance_msg(self, unique_id, input_lines, hash_key, guidance_flag=False): """ Based on the student's answer (input_lines), we grab each associated message if its corresponding misunderstanding's count is above the threshold """ if self.load_error: print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG response = repr(input_lines) self.set_tg() log.info("Guidance TG is %d", self.tg_id) if self.tg_id == TG_ERROR_VALUE: # If self.tg_id == -1, there was an error when trying to access the server log.warning("Error when trying to access server. TG == -1") print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG lambda_string_key = self.guidance_json[ 'dictTg2Func'].get(str(self.tg_id)) if not lambda_string_key: log.info("Cannot find the correct lambda in the dictionary.") print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG log.info("Lambda Group: %s", lambda_string_key) lambda_info_misu = lambda_string_key_to_func.get(lambda_string_key) if not lambda_info_misu: log.info("Cannot find info misU given the lambda string key.") print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG shorten_unique_id = assess_id_util.canonicalize(unique_id) # Try to get the info dictionary for this question. Maps wrong answer # to dictionary assess_dict_info = self.guidance_json[ 'dictAssessId2Info'].get(shorten_unique_id) if not assess_dict_info: log.info("shorten_unique_id %s is not in dictAssessId2Info", repr(shorten_unique_id)) print(GUIDANCE_DEFAULT_MSG) return EMPTY_MISUCOUNT_TGID_PRNTEDMSG wa_details = assess_dict_info['dictWA2DictInfo'].get(response) if not wa_details: log.info("Cannot find the wrong answer in the WA2Dict for this assesment.") lst_mis_u = [] else: lst_mis_u = wa_details.get('lstMisU', []) # No list of misunderstandings for this wrong answer, default message if not lst_mis_u: log.info("Cannot find the list of misunderstandings.") wa_count_threshold = self.guidance_json['wrongAnsThresh'] wa_lst_assess_num = assess_dict_info['dictWA2LstAssessNum_WA'] msg_id_set = set() should_skip_propagation = self.tg_id == 3 or self.tg_id == 4 answerDict, countData = self.get_misUdata() prev_responses = answerDict.get(shorten_unique_id, []) # Confirm that this WA has not been given before seen_before = response in prev_responses if seen_before: log.info("Answer has been seen before: {}".format(response)) else: answerDict[shorten_unique_id] = prev_responses + [response] self.save_misUdata(answerDict, countData) # Lookup the list of assessNum and WA related to this wrong answer # in the question's dictWA2LstAssessNum_WA lst_assess_num = wa_lst_assess_num.get(response, []) if not lst_assess_num: log.info("Cannot get the lst of assess nums given this reponse.") log.debug("Related LST_ASSESS_NUM: %s", lst_assess_num) # Check if the current wrong answer is in the question's dictWA2DictInfo if wa_details: log.info("The current wrong answer (%s) is in dictWA2DictInfo", response) # Check in answerDict to see if the student has ever given # any of these wrong answers (sourced from dictWA2LstAssessNum_WA) num_prev_responses = 1 for other_num, other_resp in lst_assess_num: # Get assess_id other_id = self.get_aid_from_anum(other_num) log.info("Checking if %s is in answerDict[%s]", other_resp, repr(other_id)) if other_resp in answerDict.get(other_id, []): log.debug("%s is in answerDict[%s]", other_resp, repr(other_id)) num_prev_responses += 1 log.info("Has given %d previous responses in lst_assess_num", num_prev_responses) if not should_skip_propagation: # Increment countDict by the number of wrong answers seen # for each tag assoicated with this wrong answerDict increment = num_prev_responses for misu in lst_mis_u: log.info("Updating the count of misu: %s by %s", misu, increment) countData[misu] = countData.get(misu, 0) + increment for misu in lst_mis_u: log.debug("Misu: %s has count %s", misu, countData.get(misu, 0)) if countData.get(misu, 0) >= wa_count_threshold: msg_info = lambda_info_misu(wa_details, misu) if msg_info: msg_id_set.add(msg_info) elif not should_skip_propagation: # Lookup the lst_mis_u of each wrong answer in the list of wrong # answers related to the current wrong answer (lst_assess_num), # using dictAssessNum2AssessId assess_num_to_aid = self.guidance_json['dictAssessNum2AssessId'] log.debug("Looking up the lst_misu_u of all related WA") # misu -> list of wrong answers for that related_misu_tags_dict = {} for related_num, related_resp in lst_assess_num: related_aid = assess_num_to_aid.get(related_num) log.info("Getting related resp %s for AID %s", repr(related_aid), related_resp) resp_seen_before = related_resp in answerDict.get(related_aid, []) if not resp_seen_before: continue # Get the lst_misu for this asssigmment related_info = self.guidance_json['dictAssessId2Info'].get(related_aid) if not related_info: log.info("Could not find related id: %s in info dict", related_aid) continue related_wa_info = related_info['dictWA2DictInfo'].get(related_resp) if not related_info: log.info("Could not find response %s in %s info dict", related_resp, related_aid) continue related_misu_list = related_wa_info.get('lstMisU', []) log.info("The related MISU list is %s", related_misu_list) for misu in related_misu_list: existing_resps = related_misu_tags_dict.get(misu, []) # Add dictWA2DictInfo to list of responses for this misunderstanding. related_misu_tags_dict[misu] = existing_resps + [related_wa_info] # Increment countDict for each tag in the set of tags for each related resp countData[misu] = countData.get(misu, 0) + 1 for misu, lst_wa_info in related_misu_tags_dict.items(): if countData.get(misu, 0) >= wa_count_threshold: for wa_info in lst_wa_info: msg_id_set.add(lambda_info_misu(wa_info, misu)) else: log.info("misu %s seen %s/%s times", misu, countData.get(misu, 0), wa_count_threshold) self.save_misUdata(answerDict, countData) wa_lst_explain_responses = assess_dict_info.get('lstWrongAnsWatch', []) if response in wa_lst_explain_responses: rationale = self.prompt_with_prob(orig_response=input_lines, prob=1.0) else: rationale = self.prompt_with_prob(orig_response=input_lines) if len(msg_id_set) == 0: log.info("No messages to display.") print(GUIDANCE_DEFAULT_MSG) return (countData, self.tg_id, [], rationale) print("\n-- Helpful Hint --") printed_out_msgs = [] for message_id in msg_id_set: msg = self.guidance_json['dictId2Msg'].get(str(message_id)) if msg: printed_out_msgs.append(msg) print(msg) print("-"*18) else: log.info("{} did not have a message".format(message_id)) print() print(GUIDANCE_DEFAULT_MSG) return (countData, self.tg_id, printed_out_msgs, rationale)
def eslint_supportdir(self, task_workdir): """ Returns the path where the ESLint is bootstrapped. :param string task_workdir: The task's working directory :returns: The path where ESLint is bootstrapped and whether or not it is configured :rtype: (string, bool) """ bootstrapped_support_path = os.path.join(task_workdir, 'eslint') # TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir # assuming fingerprinting works as intended. # If the eslint_setupdir is not provided or missing required files, then # clean up the directory so that Pants can install a pre-defined eslint version later on. # Otherwise, if there is no configurations changes, rely on the cache. # If there is a config change detected, use the new configuration. if self.eslint_setupdir: configured = all(os.path.exists(os.path.join(self.eslint_setupdir, f)) for f in self._eslint_required_files) else: configured = False if not configured: safe_mkdir(bootstrapped_support_path, clean=True) else: try: installed = all(filecmp.cmp( os.path.join(self.eslint_setupdir, f), os.path.join(bootstrapped_support_path, f)) for f in self._eslint_required_files) except OSError: installed = False if not installed: self._configure_eslinter(bootstrapped_support_path) return bootstrapped_support_path, configured
Returns the path where the ESLint is bootstrapped. :param string task_workdir: The task's working directory :returns: The path where ESLint is bootstrapped and whether or not it is configured :rtype: (string, bool)
Below is the the instruction that describes the task: ### Input: Returns the path where the ESLint is bootstrapped. :param string task_workdir: The task's working directory :returns: The path where ESLint is bootstrapped and whether or not it is configured :rtype: (string, bool) ### Response: def eslint_supportdir(self, task_workdir): """ Returns the path where the ESLint is bootstrapped. :param string task_workdir: The task's working directory :returns: The path where ESLint is bootstrapped and whether or not it is configured :rtype: (string, bool) """ bootstrapped_support_path = os.path.join(task_workdir, 'eslint') # TODO(nsaechao): Should only have to check if the "eslint" dir exists in the task_workdir # assuming fingerprinting works as intended. # If the eslint_setupdir is not provided or missing required files, then # clean up the directory so that Pants can install a pre-defined eslint version later on. # Otherwise, if there is no configurations changes, rely on the cache. # If there is a config change detected, use the new configuration. if self.eslint_setupdir: configured = all(os.path.exists(os.path.join(self.eslint_setupdir, f)) for f in self._eslint_required_files) else: configured = False if not configured: safe_mkdir(bootstrapped_support_path, clean=True) else: try: installed = all(filecmp.cmp( os.path.join(self.eslint_setupdir, f), os.path.join(bootstrapped_support_path, f)) for f in self._eslint_required_files) except OSError: installed = False if not installed: self._configure_eslinter(bootstrapped_support_path) return bootstrapped_support_path, configured
def get_curie_map(url): """ Get CURIE prefix map from SciGraph cypher/curies endpoint """ curie_map = {} response = requests.get(url) if response.status_code == 200: curie_map = response.json() else: curie_map = {} return curie_map
Get CURIE prefix map from SciGraph cypher/curies endpoint
Below is the the instruction that describes the task: ### Input: Get CURIE prefix map from SciGraph cypher/curies endpoint ### Response: def get_curie_map(url): """ Get CURIE prefix map from SciGraph cypher/curies endpoint """ curie_map = {} response = requests.get(url) if response.status_code == 200: curie_map = response.json() else: curie_map = {} return curie_map
def download(url, save_to_file=True, save_dir=".", filename=None, block_size=64000, overwrite=False, quiet=False): """ Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file) """ if save_to_file: if not filename: filename = safe_filename(url.split('/')[-1]) if not filename: filename = "downloaded_at_{}.file".format(time.time()) save_location = os.path.abspath(os.path.join(save_dir, filename)) if os.path.exists(save_location) and not overwrite: logger.error("File {0} already exists".format(save_location)) return False else: save_location = "memory" try: request = urlopen(url) except ValueError as err: if not quiet and "unknown url type" in str(err): logger.error("Please make sure URL is formatted correctly and" " starts with http:// or other protocol") raise err except Exception as err: if not quiet: logger.error("Could not download {0} - {1}".format(url, err)) raise err try: kb_size = int(request.headers["Content-Length"]) / 1024 except Exception as err: if not quiet: logger.debug("Could not determine file size - {0}".format(err)) file_size = "(unknown size)" else: file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999 else (kb_size / 1024, "MB")) if not quiet: logger.info("Downloading {0} {1} to {2}".format(url, file_size, save_location)) if save_to_file: with open(save_location, "wb") as f: while True: buffer = request.read(block_size) if not buffer: break f.write(buffer) return save_location else: return request.read()
Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file)
Below is the the instruction that describes the task: ### Input: Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file) ### Response: def download(url, save_to_file=True, save_dir=".", filename=None, block_size=64000, overwrite=False, quiet=False): """ Download a given URL to either file or memory :param url: Full url (with protocol) of path to download :param save_to_file: boolean if it should be saved to file or not :param save_dir: location of saved file, default is current working dir :param filename: filename to save as :param block_size: download chunk size :param overwrite: overwrite file if it already exists :param quiet: boolean to turn off logging for function :return: save location (or content if not saved to file) """ if save_to_file: if not filename: filename = safe_filename(url.split('/')[-1]) if not filename: filename = "downloaded_at_{}.file".format(time.time()) save_location = os.path.abspath(os.path.join(save_dir, filename)) if os.path.exists(save_location) and not overwrite: logger.error("File {0} already exists".format(save_location)) return False else: save_location = "memory" try: request = urlopen(url) except ValueError as err: if not quiet and "unknown url type" in str(err): logger.error("Please make sure URL is formatted correctly and" " starts with http:// or other protocol") raise err except Exception as err: if not quiet: logger.error("Could not download {0} - {1}".format(url, err)) raise err try: kb_size = int(request.headers["Content-Length"]) / 1024 except Exception as err: if not quiet: logger.debug("Could not determine file size - {0}".format(err)) file_size = "(unknown size)" else: file_size = "({0:.1f} {1})".format(*(kb_size, "KB") if kb_size < 9999 else (kb_size / 1024, "MB")) if not quiet: logger.info("Downloading {0} {1} to {2}".format(url, file_size, save_location)) if save_to_file: with open(save_location, "wb") as f: while True: buffer = request.read(block_size) if not buffer: break f.write(buffer) return save_location else: return request.read()
def __download(self, url, loaded_schemata, options): """Download the schema.""" try: reader = DocumentReader(options) d = reader.open(url) root = d.root() root.set("url", url) return self.schema.instance(root, url, loaded_schemata, options) except TransportError: msg = "import schema (%s) at (%s), failed" % (self.ns[1], url) log.error("%s, %s", self.id, msg, exc_info=True) raise Exception(msg)
Download the schema.
Below is the the instruction that describes the task: ### Input: Download the schema. ### Response: def __download(self, url, loaded_schemata, options): """Download the schema.""" try: reader = DocumentReader(options) d = reader.open(url) root = d.root() root.set("url", url) return self.schema.instance(root, url, loaded_schemata, options) except TransportError: msg = "import schema (%s) at (%s), failed" % (self.ns[1], url) log.error("%s, %s", self.id, msg, exc_info=True) raise Exception(msg)
def _target_stmt(self, stmt: object) -> tuple: """ takes the target key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements """ # make sure target is a single variable extra split to account for level= option code = '' cls = '' if isinstance(stmt, str): if len(stmt.split('/')[0].split()) == 1: code += "%s" % (stmt) else: raise SyntaxError( "ERROR in code submission. TARGET can only have one variable and you submitted: %s" % stmt) elif isinstance(stmt, list): if len(stmt) == 1: code += "%s" % str(stmt[0]) else: raise SyntaxError("The target list must have exactly one member") elif isinstance(stmt, dict): try: # check there there is only one target: length = 0 try: length += len([stmt['nominal'], stmt['interval']]) except KeyError: try: length += len([stmt['nominal']]) except KeyError: try: length += len([stmt['interval']]) except KeyError: raise if length == 1: if 'interval' in stmt.keys(): if isinstance(stmt['interval'], str): code += "%s" % stmt['interval'] if isinstance(stmt['interval'], list): code += "%s" % " ".join(stmt['interval']) if 'nominal' in stmt.keys(): if isinstance(stmt['nominal'], str): code += "%s" % stmt['nominal'] cls += "%s" % stmt['nominal'] if isinstance(stmt['nominal'], list): code += "%s" % " ".join(stmt['nominal']) cls += "%s" % " ".join(stmt['nominal']) else: raise SyntaxError except SyntaxError: print("SyntaxError: TARGET can only have one variable") except KeyError: print("KeyError: Proper keys not found for TARGET dictionary: %s" % stmt.keys()) else: raise SyntaxError("TARGET is in an unknown format: %s" % str(stmt)) return (code, cls)
takes the target key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements
Below is the the instruction that describes the task: ### Input: takes the target key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements ### Response: def _target_stmt(self, stmt: object) -> tuple: """ takes the target key from kwargs and processes it to aid in the generation of a model statement :param stmt: str, list, or dict that contains the model information. :return: tuple of strings one for the class statement one for the model statements """ # make sure target is a single variable extra split to account for level= option code = '' cls = '' if isinstance(stmt, str): if len(stmt.split('/')[0].split()) == 1: code += "%s" % (stmt) else: raise SyntaxError( "ERROR in code submission. TARGET can only have one variable and you submitted: %s" % stmt) elif isinstance(stmt, list): if len(stmt) == 1: code += "%s" % str(stmt[0]) else: raise SyntaxError("The target list must have exactly one member") elif isinstance(stmt, dict): try: # check there there is only one target: length = 0 try: length += len([stmt['nominal'], stmt['interval']]) except KeyError: try: length += len([stmt['nominal']]) except KeyError: try: length += len([stmt['interval']]) except KeyError: raise if length == 1: if 'interval' in stmt.keys(): if isinstance(stmt['interval'], str): code += "%s" % stmt['interval'] if isinstance(stmt['interval'], list): code += "%s" % " ".join(stmt['interval']) if 'nominal' in stmt.keys(): if isinstance(stmt['nominal'], str): code += "%s" % stmt['nominal'] cls += "%s" % stmt['nominal'] if isinstance(stmt['nominal'], list): code += "%s" % " ".join(stmt['nominal']) cls += "%s" % " ".join(stmt['nominal']) else: raise SyntaxError except SyntaxError: print("SyntaxError: TARGET can only have one variable") except KeyError: print("KeyError: Proper keys not found for TARGET dictionary: %s" % stmt.keys()) else: raise SyntaxError("TARGET is in an unknown format: %s" % str(stmt)) return (code, cls)
def import_data( self, resource_group_name, name, files, format=None, custom_headers=None, raw=False, polling=True, **operation_config): """Import data into Redis cache. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param name: The name of the Redis cache. :type name: str :param files: files to import. :type files: list[str] :param format: File format. :type format: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._import_data_initial( resource_group_name=resource_group_name, name=name, files=files, format=format, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
Import data into Redis cache. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param name: The name of the Redis cache. :type name: str :param files: files to import. :type files: list[str] :param format: File format. :type format: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
Below is the the instruction that describes the task: ### Input: Import data into Redis cache. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param name: The name of the Redis cache. :type name: str :param files: files to import. :type files: list[str] :param format: File format. :type format: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` ### Response: def import_data( self, resource_group_name, name, files, format=None, custom_headers=None, raw=False, polling=True, **operation_config): """Import data into Redis cache. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param name: The name of the Redis cache. :type name: str :param files: files to import. :type files: list[str] :param format: File format. :type format: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._import_data_initial( resource_group_name=resource_group_name, name=name, files=files, format=format, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
def get_subscription_labels(self, userPk): """Returns a list with all the labels the user is subscribed to""" r = self._request('subscriptions/' + str(userPk)) if r: s = r.json() return s return []
Returns a list with all the labels the user is subscribed to
Below is the the instruction that describes the task: ### Input: Returns a list with all the labels the user is subscribed to ### Response: def get_subscription_labels(self, userPk): """Returns a list with all the labels the user is subscribed to""" r = self._request('subscriptions/' + str(userPk)) if r: s = r.json() return s return []
def parse_uri(self, uri=None): ''' parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef ''' # no uri provided, assume root if not uri: return rdflib.term.URIRef(self.root) # string uri provided elif type(uri) == str: # assume "short" uri, expand with repo root if type(uri) == str and not uri.startswith('http'): return rdflib.term.URIRef("%s%s" % (self.root, uri)) # else, assume full uri else: return rdflib.term.URIRef(uri) # already rdflib.term.URIRef elif type(uri) == rdflib.term.URIRef: return uri # unknown input else: raise TypeError('invalid URI input')
parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef
Below is the the instruction that describes the task: ### Input: parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef ### Response: def parse_uri(self, uri=None): ''' parses and cleans up possible uri inputs, return instance of rdflib.term.URIRef Args: uri (rdflib.term.URIRef,str): input URI Returns: rdflib.term.URIRef ''' # no uri provided, assume root if not uri: return rdflib.term.URIRef(self.root) # string uri provided elif type(uri) == str: # assume "short" uri, expand with repo root if type(uri) == str and not uri.startswith('http'): return rdflib.term.URIRef("%s%s" % (self.root, uri)) # else, assume full uri else: return rdflib.term.URIRef(uri) # already rdflib.term.URIRef elif type(uri) == rdflib.term.URIRef: return uri # unknown input else: raise TypeError('invalid URI input')
def write_summaries(self, tagged_data, experiment_name, run_name): """Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run. """ logger.debug('Writing summaries for %s tags', len(tagged_data)) # Connection used as context manager for auto commit/rollback on exit. # We still need an explicit BEGIN, because it doesn't do one on enter, # it waits until the first DML command - which is totally broken. # See: https://stackoverflow.com/a/44448465/1179226 with self._db: self._db.execute('BEGIN TRANSACTION') run_id = self._maybe_init_run(experiment_name, run_name) tag_to_metadata = { tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data) } tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata) tensor_values = [] for tag, tagdata in six.iteritems(tagged_data): tag_id = tag_to_id[tag] for step, wall_time, tensor_proto in tagdata.values: dtype = tensor_proto.dtype shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim) # Use tensor_proto.tensor_content if it's set, to skip relatively # expensive extraction into intermediate ndarray. data = self._make_blob( tensor_proto.tensor_content or tensor_util.make_ndarray(tensor_proto).tobytes()) tensor_values.append((tag_id, step, wall_time, dtype, shape, data)) self._db.executemany( """ INSERT OR REPLACE INTO Tensors ( series, step, computed_time, dtype, shape, data ) VALUES (?, ?, ?, ?, ?, ?) """, tensor_values)
Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run.
Below is the the instruction that describes the task: ### Input: Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run. ### Response: def write_summaries(self, tagged_data, experiment_name, run_name): """Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run. """ logger.debug('Writing summaries for %s tags', len(tagged_data)) # Connection used as context manager for auto commit/rollback on exit. # We still need an explicit BEGIN, because it doesn't do one on enter, # it waits until the first DML command - which is totally broken. # See: https://stackoverflow.com/a/44448465/1179226 with self._db: self._db.execute('BEGIN TRANSACTION') run_id = self._maybe_init_run(experiment_name, run_name) tag_to_metadata = { tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data) } tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata) tensor_values = [] for tag, tagdata in six.iteritems(tagged_data): tag_id = tag_to_id[tag] for step, wall_time, tensor_proto in tagdata.values: dtype = tensor_proto.dtype shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim) # Use tensor_proto.tensor_content if it's set, to skip relatively # expensive extraction into intermediate ndarray. data = self._make_blob( tensor_proto.tensor_content or tensor_util.make_ndarray(tensor_proto).tobytes()) tensor_values.append((tag_id, step, wall_time, dtype, shape, data)) self._db.executemany( """ INSERT OR REPLACE INTO Tensors ( series, step, computed_time, dtype, shape, data ) VALUES (?, ?, ?, ?, ?, ?) """, tensor_values)
def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) """ raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small") return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image)
Below is the the instruction that describes the task: ### Input: Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) ### Response: def face_encodings(face_image, known_face_locations=None, num_jitters=1): """ Given an image, return the 128-dimension face encoding for each face in the image. :param face_image: The image that contains one or more faces :param known_face_locations: Optional - the bounding boxes of each face if you already know them. :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower) :return: A list of 128-dimensional face encodings (one for each face in the image) """ raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model="small") return [np.array(face_encoder.compute_face_descriptor(face_image, raw_landmark_set, num_jitters)) for raw_landmark_set in raw_landmarks]
def start_logging(Name, region=None, key=None, keyid=None, profile=None): ''' Start logging for a trail Returns {started: true} if the trail was started and returns {started: False} if the trail was not started. CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.start_logging my_trail ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.start_logging(Name=Name) return {'started': True} except ClientError as e: return {'started': False, 'error': __utils__['boto3.get_error'](e)}
Start logging for a trail Returns {started: true} if the trail was started and returns {started: False} if the trail was not started. CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.start_logging my_trail
Below is the the instruction that describes the task: ### Input: Start logging for a trail Returns {started: true} if the trail was started and returns {started: False} if the trail was not started. CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.start_logging my_trail ### Response: def start_logging(Name, region=None, key=None, keyid=None, profile=None): ''' Start logging for a trail Returns {started: true} if the trail was started and returns {started: False} if the trail was not started. CLI Example: .. code-block:: bash salt myminion boto_cloudtrail.start_logging my_trail ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.start_logging(Name=Name) return {'started': True} except ClientError as e: return {'started': False, 'error': __utils__['boto3.get_error'](e)}
def serialize_acquire_mutex(self, spec): """ Serializer for :meth:`SpiffWorkflow.specs.AcquireMutex`. """ elem = etree.Element('acquire-mutex') self.serialize_task_spec(spec, elem) SubElement(elem, 'mutex').text = spec.mutex return elem
Serializer for :meth:`SpiffWorkflow.specs.AcquireMutex`.
Below is the the instruction that describes the task: ### Input: Serializer for :meth:`SpiffWorkflow.specs.AcquireMutex`. ### Response: def serialize_acquire_mutex(self, spec): """ Serializer for :meth:`SpiffWorkflow.specs.AcquireMutex`. """ elem = etree.Element('acquire-mutex') self.serialize_task_spec(spec, elem) SubElement(elem, 'mutex').text = spec.mutex return elem
def view(self, view): ''' View the wiki. ''' kwd = { 'pager': '', 'editable': self.editable(), } self.render('wiki_page/wiki_view.html', postinfo=view, kwd=kwd, userinfo=self.userinfo)
View the wiki.
Below is the the instruction that describes the task: ### Input: View the wiki. ### Response: def view(self, view): ''' View the wiki. ''' kwd = { 'pager': '', 'editable': self.editable(), } self.render('wiki_page/wiki_view.html', postinfo=view, kwd=kwd, userinfo=self.userinfo)
def show_view(self): """ Show :attr:`LoginForm` form. """ self.current.output['login_process'] = True if self.current.is_auth: self._do_upgrade() else: self.current.output['forms'] = LoginForm(current=self.current).serialize()
Show :attr:`LoginForm` form.
Below is the the instruction that describes the task: ### Input: Show :attr:`LoginForm` form. ### Response: def show_view(self): """ Show :attr:`LoginForm` form. """ self.current.output['login_process'] = True if self.current.is_auth: self._do_upgrade() else: self.current.output['forms'] = LoginForm(current=self.current).serialize()
def newton(self, start_x=None, tolerance=1.0e-6): """ Optimise value of x using newton gauss """ if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_newton(start_x, self._a, self._c, tolerance)
Optimise value of x using newton gauss
Below is the the instruction that describes the task: ### Input: Optimise value of x using newton gauss ### Response: def newton(self, start_x=None, tolerance=1.0e-6): """ Optimise value of x using newton gauss """ if start_x is None: start_x = self._analytical_fitter.fit(self._c) return optimise_newton(start_x, self._a, self._c, tolerance)
def make_query(self, return_score=False): """Return the index of the sample to be queried and labeled and selection score of each sample. Read-only. No modification to the internal states. Returns ------- ask_id : int The index of the next unlabeled sample to be queried and labeled. score : list of (index, score) tuple Selection score of unlabled entries, the larger the better. """ dataset = self.dataset self.model.train(dataset) unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries()) if isinstance(self.model, ProbabilisticModel): dvalue = self.model.predict_proba(X_pool) elif isinstance(self.model, ContinuousModel): dvalue = self.model.predict_real(X_pool) if self.method == 'lc': # least confident score = -np.max(dvalue, axis=1) elif self.method == 'sm': # smallest margin if np.shape(dvalue)[1] > 2: # Find 2 largest decision values dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2]) score = -np.abs(dvalue[:, 0] - dvalue[:, 1]) elif self.method == 'entropy': score = np.sum(-dvalue * np.log(dvalue), axis=1) ask_id = np.argmax(score) if return_score: return unlabeled_entry_ids[ask_id], \ list(zip(unlabeled_entry_ids, score)) else: return unlabeled_entry_ids[ask_id]
Return the index of the sample to be queried and labeled and selection score of each sample. Read-only. No modification to the internal states. Returns ------- ask_id : int The index of the next unlabeled sample to be queried and labeled. score : list of (index, score) tuple Selection score of unlabled entries, the larger the better.
Below is the the instruction that describes the task: ### Input: Return the index of the sample to be queried and labeled and selection score of each sample. Read-only. No modification to the internal states. Returns ------- ask_id : int The index of the next unlabeled sample to be queried and labeled. score : list of (index, score) tuple Selection score of unlabled entries, the larger the better. ### Response: def make_query(self, return_score=False): """Return the index of the sample to be queried and labeled and selection score of each sample. Read-only. No modification to the internal states. Returns ------- ask_id : int The index of the next unlabeled sample to be queried and labeled. score : list of (index, score) tuple Selection score of unlabled entries, the larger the better. """ dataset = self.dataset self.model.train(dataset) unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries()) if isinstance(self.model, ProbabilisticModel): dvalue = self.model.predict_proba(X_pool) elif isinstance(self.model, ContinuousModel): dvalue = self.model.predict_real(X_pool) if self.method == 'lc': # least confident score = -np.max(dvalue, axis=1) elif self.method == 'sm': # smallest margin if np.shape(dvalue)[1] > 2: # Find 2 largest decision values dvalue = -(np.partition(-dvalue, 2, axis=1)[:, :2]) score = -np.abs(dvalue[:, 0] - dvalue[:, 1]) elif self.method == 'entropy': score = np.sum(-dvalue * np.log(dvalue), axis=1) ask_id = np.argmax(score) if return_score: return unlabeled_entry_ids[ask_id], \ list(zip(unlabeled_entry_ids, score)) else: return unlabeled_entry_ids[ask_id]
def collection_get_options(collection_name, **kwargs): ''' Get collection options Additional parameters (kwargs) may be passed, they will be proxied to http.query CLI Example: .. code-block:: bash salt '*' solrcloud.collection_get_options collection_name ''' cluster = cluster_status(**kwargs) options = { "collection.configName": cluster["collections"][collection_name]["configName"], "router.name": cluster["collections"][collection_name]["router"]["name"], "replicationFactor": int(cluster["collections"][collection_name]["replicationFactor"]), "maxShardsPerNode": int(cluster["collections"][collection_name]["maxShardsPerNode"]), "autoAddReplicas": cluster["collections"][collection_name]["autoAddReplicas"] is True } if 'rule' in cluster["collections"][collection_name]: options['rule'] = cluster["collections"][collection_name]['rule'] if 'snitch' in cluster["collections"][collection_name]: options['snitch'] = cluster["collections"][collection_name]['rule'] return options
Get collection options Additional parameters (kwargs) may be passed, they will be proxied to http.query CLI Example: .. code-block:: bash salt '*' solrcloud.collection_get_options collection_name
Below is the the instruction that describes the task: ### Input: Get collection options Additional parameters (kwargs) may be passed, they will be proxied to http.query CLI Example: .. code-block:: bash salt '*' solrcloud.collection_get_options collection_name ### Response: def collection_get_options(collection_name, **kwargs): ''' Get collection options Additional parameters (kwargs) may be passed, they will be proxied to http.query CLI Example: .. code-block:: bash salt '*' solrcloud.collection_get_options collection_name ''' cluster = cluster_status(**kwargs) options = { "collection.configName": cluster["collections"][collection_name]["configName"], "router.name": cluster["collections"][collection_name]["router"]["name"], "replicationFactor": int(cluster["collections"][collection_name]["replicationFactor"]), "maxShardsPerNode": int(cluster["collections"][collection_name]["maxShardsPerNode"]), "autoAddReplicas": cluster["collections"][collection_name]["autoAddReplicas"] is True } if 'rule' in cluster["collections"][collection_name]: options['rule'] = cluster["collections"][collection_name]['rule'] if 'snitch' in cluster["collections"][collection_name]: options['snitch'] = cluster["collections"][collection_name]['rule'] return options
async def _transmit(self): """ Transmit outbound data. """ # send FORWARD TSN if self._forward_tsn_chunk is not None: await self._send_chunk(self._forward_tsn_chunk) self._forward_tsn_chunk = None # ensure T3 is running if not self._t3_handle: self._t3_start() # limit burst size if self._fast_recovery_exit is not None: burst_size = 2 * USERDATA_MAX_LENGTH else: burst_size = 4 * USERDATA_MAX_LENGTH cwnd = min(self._flight_size + burst_size, self._cwnd) # retransmit retransmit_earliest = True for chunk in self._sent_queue: if chunk._retransmit: if self._fast_recovery_transmit: self._fast_recovery_transmit = False elif self._flight_size >= cwnd: return self._flight_size_increase(chunk) chunk._misses = 0 chunk._retransmit = False chunk._sent_count += 1 await self._send_chunk(chunk) if retransmit_earliest: # restart the T3 timer as the earliest outstanding TSN # is being retransmitted self._t3_restart() retransmit_earliest = False while self._outbound_queue and self._flight_size < cwnd: chunk = self._outbound_queue.popleft() self._sent_queue.append(chunk) self._flight_size_increase(chunk) # update counters chunk._sent_count += 1 chunk._sent_time = time.time() await self._send_chunk(chunk) if not self._t3_handle: self._t3_start()
Transmit outbound data.
Below is the the instruction that describes the task: ### Input: Transmit outbound data. ### Response: async def _transmit(self): """ Transmit outbound data. """ # send FORWARD TSN if self._forward_tsn_chunk is not None: await self._send_chunk(self._forward_tsn_chunk) self._forward_tsn_chunk = None # ensure T3 is running if not self._t3_handle: self._t3_start() # limit burst size if self._fast_recovery_exit is not None: burst_size = 2 * USERDATA_MAX_LENGTH else: burst_size = 4 * USERDATA_MAX_LENGTH cwnd = min(self._flight_size + burst_size, self._cwnd) # retransmit retransmit_earliest = True for chunk in self._sent_queue: if chunk._retransmit: if self._fast_recovery_transmit: self._fast_recovery_transmit = False elif self._flight_size >= cwnd: return self._flight_size_increase(chunk) chunk._misses = 0 chunk._retransmit = False chunk._sent_count += 1 await self._send_chunk(chunk) if retransmit_earliest: # restart the T3 timer as the earliest outstanding TSN # is being retransmitted self._t3_restart() retransmit_earliest = False while self._outbound_queue and self._flight_size < cwnd: chunk = self._outbound_queue.popleft() self._sent_queue.append(chunk) self._flight_size_increase(chunk) # update counters chunk._sent_count += 1 chunk._sent_time = time.time() await self._send_chunk(chunk) if not self._t3_handle: self._t3_start()
def _write_stop_to_stop_network_edges(net, file_name, data=True, fmt=None): """ Write out a network Parameters ---------- net: networkx.DiGraph base_name: str path to the filename (without extension) data: bool, optional whether or not to write out any edge data present fmt: str, optional If "csv" write out the network in csv format. """ if fmt is None: fmt = "edg" if fmt == "edg": if data: networkx.write_edgelist(net, file_name, data=True) else: networkx.write_edgelist(net, file_name) elif fmt == "csv": with open(file_name, 'w') as f: # writing out the header edge_iter = net.edges_iter(data=True) _, _, edg_data = next(edge_iter) edg_data_keys = list(sorted(edg_data.keys())) header = ";".join(["from_stop_I", "to_stop_I"] + edg_data_keys) f.write(header) for from_node_I, to_node_I, data in net.edges_iter(data=True): f.write("\n") values = [str(from_node_I), str(to_node_I)] data_values = [] for key in edg_data_keys: if key == "route_I_counts": route_I_counts_string = str(data[key]).replace(" ", "")[1:-1] data_values.append(route_I_counts_string) else: data_values.append(str(data[key])) all_values = values + data_values f.write(";".join(all_values))
Write out a network Parameters ---------- net: networkx.DiGraph base_name: str path to the filename (without extension) data: bool, optional whether or not to write out any edge data present fmt: str, optional If "csv" write out the network in csv format.
Below is the the instruction that describes the task: ### Input: Write out a network Parameters ---------- net: networkx.DiGraph base_name: str path to the filename (without extension) data: bool, optional whether or not to write out any edge data present fmt: str, optional If "csv" write out the network in csv format. ### Response: def _write_stop_to_stop_network_edges(net, file_name, data=True, fmt=None): """ Write out a network Parameters ---------- net: networkx.DiGraph base_name: str path to the filename (without extension) data: bool, optional whether or not to write out any edge data present fmt: str, optional If "csv" write out the network in csv format. """ if fmt is None: fmt = "edg" if fmt == "edg": if data: networkx.write_edgelist(net, file_name, data=True) else: networkx.write_edgelist(net, file_name) elif fmt == "csv": with open(file_name, 'w') as f: # writing out the header edge_iter = net.edges_iter(data=True) _, _, edg_data = next(edge_iter) edg_data_keys = list(sorted(edg_data.keys())) header = ";".join(["from_stop_I", "to_stop_I"] + edg_data_keys) f.write(header) for from_node_I, to_node_I, data in net.edges_iter(data=True): f.write("\n") values = [str(from_node_I), str(to_node_I)] data_values = [] for key in edg_data_keys: if key == "route_I_counts": route_I_counts_string = str(data[key]).replace(" ", "")[1:-1] data_values.append(route_I_counts_string) else: data_values.append(str(data[key])) all_values = values + data_values f.write(";".join(all_values))
def dfs_edges(G, start, depth_limit=1, get_only=True, get_path=False): """Deepest first search.""" depth_limit = depth_limit - 1 # creates unsigned int array (2 Byte) output_nodes = array('L') output_depth = array('I') # creates float array (4 Byte) output_weights = array('f') apath = [] if G.node.get(start) is None: # raise KeyError('Start node not found') print('Start node not found') return output_nodes, output_weights, output_depth, apath visited = set() visited.add(start) # Save the start node with its data to the stack stack = [(start, G.edges_iter(start, data=True), 1.0)] visited.add(start) while stack: if len(output_nodes) > 80100100: print("To many nodes for: {}".format(start)) del output_nodes del output_weights del output_depth output_nodes = array('L') output_depth = array('I') # creates float array (4 Byte) output_weights = array('f') gc.collect() break parent, children, weight = stack[-1] try: parent_, child, child_keys = next(children) # print "child: {}, parent_data: {}".format(child, parent_data) if child not in visited: weight = child_keys.get('weight', 1.0) * weight visited.add(child) if len(stack) >= depth_limit or weight <= 0.00001: visited.remove(child) else: stack.append((child, G.edges_iter(child, data=True), weight)) # if its not and user. if get_only and child > 100000000000: # if get_only and G.node[child].get('Type') != get_only: continue output_nodes.append(child) output_weights.append(weight) output_depth.append(len(stack)) if get_path: apath.append([step[0] for step in stack]) except StopIteration: stack.pop() visited.remove(parent) # if data.get('Type') == "Node": return output_nodes, output_weights, output_depth, apath
Deepest first search.
Below is the the instruction that describes the task: ### Input: Deepest first search. ### Response: def dfs_edges(G, start, depth_limit=1, get_only=True, get_path=False): """Deepest first search.""" depth_limit = depth_limit - 1 # creates unsigned int array (2 Byte) output_nodes = array('L') output_depth = array('I') # creates float array (4 Byte) output_weights = array('f') apath = [] if G.node.get(start) is None: # raise KeyError('Start node not found') print('Start node not found') return output_nodes, output_weights, output_depth, apath visited = set() visited.add(start) # Save the start node with its data to the stack stack = [(start, G.edges_iter(start, data=True), 1.0)] visited.add(start) while stack: if len(output_nodes) > 80100100: print("To many nodes for: {}".format(start)) del output_nodes del output_weights del output_depth output_nodes = array('L') output_depth = array('I') # creates float array (4 Byte) output_weights = array('f') gc.collect() break parent, children, weight = stack[-1] try: parent_, child, child_keys = next(children) # print "child: {}, parent_data: {}".format(child, parent_data) if child not in visited: weight = child_keys.get('weight', 1.0) * weight visited.add(child) if len(stack) >= depth_limit or weight <= 0.00001: visited.remove(child) else: stack.append((child, G.edges_iter(child, data=True), weight)) # if its not and user. if get_only and child > 100000000000: # if get_only and G.node[child].get('Type') != get_only: continue output_nodes.append(child) output_weights.append(weight) output_depth.append(len(stack)) if get_path: apath.append([step[0] for step in stack]) except StopIteration: stack.pop() visited.remove(parent) # if data.get('Type') == "Node": return output_nodes, output_weights, output_depth, apath
def prepare_request(self, method, url, body=''): """Prepare the request body and headers :returns: headers of the signed request """ headers = { 'Content-type': 'application/json', } # Note: we don't pass body to sign() since it's only for bodies that # are form-urlencoded. Similarly, we don't care about the body that # sign() returns. uri, signed_headers, signed_body = self.oauth_client.sign( url, http_method=method, headers=headers) if body: if method == 'GET': body = urllib.urlencode(body) else: body = json.dumps(body) headers.update(signed_headers) return {"headers": headers, "data": body}
Prepare the request body and headers :returns: headers of the signed request
Below is the the instruction that describes the task: ### Input: Prepare the request body and headers :returns: headers of the signed request ### Response: def prepare_request(self, method, url, body=''): """Prepare the request body and headers :returns: headers of the signed request """ headers = { 'Content-type': 'application/json', } # Note: we don't pass body to sign() since it's only for bodies that # are form-urlencoded. Similarly, we don't care about the body that # sign() returns. uri, signed_headers, signed_body = self.oauth_client.sign( url, http_method=method, headers=headers) if body: if method == 'GET': body = urllib.urlencode(body) else: body = json.dumps(body) headers.update(signed_headers) return {"headers": headers, "data": body}
def save(self, running=None): """ save or update this cached gear into the Ariane server cache :param running: the new running value. if None ignored :return: """ LOGGER.debug("InjectorCachedGear.save") ret = True if running is not None: self.running = running if self.service is None: self.service = InjectorCachedGearService.make_admin_on_demand_service(self) if self.service is not None and not self.service.is_started: self.service.start() args = {'properties': {'OPERATION': 'PUSH_GEAR_IN_CACHE', 'REMOTE_GEAR': str(self.injector_gear_2_json()).replace("'", '"'), 'CACHE_ID': InjectorCachedGearService.cache_id}} result = InjectorCachedGearService.requester.call(args).get() if result.rc != 0: err_msg = 'InjectorCachedGear.save - Problem while saving gear ( id : ' + self.id + \ 'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \ " (" + str(result.rc) + ")" LOGGER.warning(err_msg) ret = False return ret
save or update this cached gear into the Ariane server cache :param running: the new running value. if None ignored :return:
Below is the the instruction that describes the task: ### Input: save or update this cached gear into the Ariane server cache :param running: the new running value. if None ignored :return: ### Response: def save(self, running=None): """ save or update this cached gear into the Ariane server cache :param running: the new running value. if None ignored :return: """ LOGGER.debug("InjectorCachedGear.save") ret = True if running is not None: self.running = running if self.service is None: self.service = InjectorCachedGearService.make_admin_on_demand_service(self) if self.service is not None and not self.service.is_started: self.service.start() args = {'properties': {'OPERATION': 'PUSH_GEAR_IN_CACHE', 'REMOTE_GEAR': str(self.injector_gear_2_json()).replace("'", '"'), 'CACHE_ID': InjectorCachedGearService.cache_id}} result = InjectorCachedGearService.requester.call(args).get() if result.rc != 0: err_msg = 'InjectorCachedGear.save - Problem while saving gear ( id : ' + self.id + \ 'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \ " (" + str(result.rc) + ")" LOGGER.warning(err_msg) ret = False return ret
def mk_example(): """mk_example: book example for the single item lot sizing""" T = 5 _,f,c,d,h = multidict({ 1 : [3,1,5,1], 2 : [3,1,7,1], 3 : [3,3,3,1], 4 : [3,3,6,1], 5 : [3,3,4,1], }) return T,f,c,d,h
mk_example: book example for the single item lot sizing
Below is the the instruction that describes the task: ### Input: mk_example: book example for the single item lot sizing ### Response: def mk_example(): """mk_example: book example for the single item lot sizing""" T = 5 _,f,c,d,h = multidict({ 1 : [3,1,5,1], 2 : [3,1,7,1], 3 : [3,3,3,1], 4 : [3,3,6,1], 5 : [3,3,4,1], }) return T,f,c,d,h
def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old
Check if proxy conf exists and update
Below is the the instruction that describes the task: ### Input: Check if proxy conf exists and update ### Response: def _proxy_conf_file(proxyfile, test): ''' Check if proxy conf exists and update ''' changes_old = [] changes_new = [] success = True if not os.path.exists(proxyfile): try: if not test: changes_new.append(_write_proxy_conf(proxyfile)) msg = 'Salt Proxy: Wrote proxy conf {0}'.format(proxyfile) else: msg = 'Salt Proxy: Update required to proxy conf {0}' \ .format(proxyfile) except (OSError, IOError) as err: success = False msg = 'Salt Proxy: Error writing proxy file {0}'.format(err) log.error(msg) changes_new.append(msg) changes_new.append(msg) log.debug(msg) else: msg = 'Salt Proxy: {0} already exists, skipping'.format(proxyfile) changes_old.append(msg) log.debug(msg) return success, changes_new, changes_old
def _compare_areas(self, datasets=None, compare_func=max): """Get for the provided datasets. Args: datasets (iterable): Datasets whose areas will be compared. Can be either `xarray.DataArray` objects or identifiers to get the DataArrays from the current Scene. Defaults to all datasets. This can also be a series of area objects, typically AreaDefinitions. compare_func (callable): `min` or `max` or other function used to compare the dataset's areas. """ if datasets is None: datasets = list(self.values()) areas = [] for ds in datasets: if isinstance(ds, BaseDefinition): areas.append(ds) continue elif not isinstance(ds, DataArray): ds = self[ds] area = ds.attrs.get('area') areas.append(area) areas = [x for x in areas if x is not None] if not areas: raise ValueError("No dataset areas available") if not all(isinstance(x, type(areas[0])) for x in areas[1:]): raise ValueError("Can't compare areas of different types") elif isinstance(areas[0], AreaDefinition): first_pstr = areas[0].proj_str if not all(ad.proj_str == first_pstr for ad in areas[1:]): raise ValueError("Can't compare areas with different " "projections.") def key_func(ds): return 1. / ds.pixel_size_x else: def key_func(ds): return ds.shape # find the highest/lowest area among the provided return compare_func(areas, key=key_func)
Get for the provided datasets. Args: datasets (iterable): Datasets whose areas will be compared. Can be either `xarray.DataArray` objects or identifiers to get the DataArrays from the current Scene. Defaults to all datasets. This can also be a series of area objects, typically AreaDefinitions. compare_func (callable): `min` or `max` or other function used to compare the dataset's areas.
Below is the the instruction that describes the task: ### Input: Get for the provided datasets. Args: datasets (iterable): Datasets whose areas will be compared. Can be either `xarray.DataArray` objects or identifiers to get the DataArrays from the current Scene. Defaults to all datasets. This can also be a series of area objects, typically AreaDefinitions. compare_func (callable): `min` or `max` or other function used to compare the dataset's areas. ### Response: def _compare_areas(self, datasets=None, compare_func=max): """Get for the provided datasets. Args: datasets (iterable): Datasets whose areas will be compared. Can be either `xarray.DataArray` objects or identifiers to get the DataArrays from the current Scene. Defaults to all datasets. This can also be a series of area objects, typically AreaDefinitions. compare_func (callable): `min` or `max` or other function used to compare the dataset's areas. """ if datasets is None: datasets = list(self.values()) areas = [] for ds in datasets: if isinstance(ds, BaseDefinition): areas.append(ds) continue elif not isinstance(ds, DataArray): ds = self[ds] area = ds.attrs.get('area') areas.append(area) areas = [x for x in areas if x is not None] if not areas: raise ValueError("No dataset areas available") if not all(isinstance(x, type(areas[0])) for x in areas[1:]): raise ValueError("Can't compare areas of different types") elif isinstance(areas[0], AreaDefinition): first_pstr = areas[0].proj_str if not all(ad.proj_str == first_pstr for ad in areas[1:]): raise ValueError("Can't compare areas with different " "projections.") def key_func(ds): return 1. / ds.pixel_size_x else: def key_func(ds): return ds.shape # find the highest/lowest area among the provided return compare_func(areas, key=key_func)
def linked(base_dir: str, rr_id: str) -> str: """ Get, from the specified directory, the path to the tails file associated with the input revocation registry identifier, or None for no such file. :param base_dir: base directory for tails files, thereafter split by cred def id :param rr_id: rev reg id :return: (stringified) path to tails file of interest, or None for no such file. """ LOGGER.debug('Tails.linked >>> base_dir: %s, rr_id: %s', base_dir, rr_id) if not ok_rev_reg_id(rr_id): LOGGER.debug('Tails.linked <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) cd_id = rev_reg_id2cred_def_id(rr_id) link = join(base_dir, cd_id, rr_id) rv = join(base_dir, cd_id, readlink(link)) if islink(link) else None LOGGER.debug('Tails.linked <<< %s', rv) return rv
Get, from the specified directory, the path to the tails file associated with the input revocation registry identifier, or None for no such file. :param base_dir: base directory for tails files, thereafter split by cred def id :param rr_id: rev reg id :return: (stringified) path to tails file of interest, or None for no such file.
Below is the the instruction that describes the task: ### Input: Get, from the specified directory, the path to the tails file associated with the input revocation registry identifier, or None for no such file. :param base_dir: base directory for tails files, thereafter split by cred def id :param rr_id: rev reg id :return: (stringified) path to tails file of interest, or None for no such file. ### Response: def linked(base_dir: str, rr_id: str) -> str: """ Get, from the specified directory, the path to the tails file associated with the input revocation registry identifier, or None for no such file. :param base_dir: base directory for tails files, thereafter split by cred def id :param rr_id: rev reg id :return: (stringified) path to tails file of interest, or None for no such file. """ LOGGER.debug('Tails.linked >>> base_dir: %s, rr_id: %s', base_dir, rr_id) if not ok_rev_reg_id(rr_id): LOGGER.debug('Tails.linked <!< Bad rev reg id %s', rr_id) raise BadIdentifier('Bad rev reg id {}'.format(rr_id)) cd_id = rev_reg_id2cred_def_id(rr_id) link = join(base_dir, cd_id, rr_id) rv = join(base_dir, cd_id, readlink(link)) if islink(link) else None LOGGER.debug('Tails.linked <<< %s', rv) return rv
def _ee_decode(self, msg): """EE: Entry/exit timer report.""" return {'area': int(msg[4:5])-1, 'is_exit': msg[5:6] == '0', 'timer1': int(msg[6:9]), 'timer2': int(msg[9:12]), 'armed_status': msg[12:13]}
EE: Entry/exit timer report.
Below is the the instruction that describes the task: ### Input: EE: Entry/exit timer report. ### Response: def _ee_decode(self, msg): """EE: Entry/exit timer report.""" return {'area': int(msg[4:5])-1, 'is_exit': msg[5:6] == '0', 'timer1': int(msg[6:9]), 'timer2': int(msg[9:12]), 'armed_status': msg[12:13]}
def get_activity_objective_bank_assignment_session(self, proxy): """Gets the session for assigning activity to objective bank mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ActivityObjectiveBankAssignmentSession) - an ``ActivityObjectiveBankAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_activity_objective_bank_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_objective_bank_assignment()`` is ``true``.* """ if not self.supports_activity_objective_bank_assignment(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ActivityObjectiveBankAssignmentSession(proxy=proxy, runtime=self._runtime)
Gets the session for assigning activity to objective bank mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ActivityObjectiveBankAssignmentSession) - an ``ActivityObjectiveBankAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_activity_objective_bank_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_objective_bank_assignment()`` is ``true``.*
Below is the the instruction that describes the task: ### Input: Gets the session for assigning activity to objective bank mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ActivityObjectiveBankAssignmentSession) - an ``ActivityObjectiveBankAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_activity_objective_bank_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_objective_bank_assignment()`` is ``true``.* ### Response: def get_activity_objective_bank_assignment_session(self, proxy): """Gets the session for assigning activity to objective bank mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ActivityObjectiveBankAssignmentSession) - an ``ActivityObjectiveBankAssignmentSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_activity_objective_bank_assignment()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_activity_objective_bank_assignment()`` is ``true``.* """ if not self.supports_activity_objective_bank_assignment(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ActivityObjectiveBankAssignmentSession(proxy=proxy, runtime=self._runtime)
def edf_totdev(N, m, alpha): """ Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7 """ alpha = int(alpha) if alpha in [0, -1, -2]: # alpha 0 WFM # alpha -1 FFM # alpha -2 RWFM NIST_SP1065_table7 = [(1.50, 0.0), (1.17, 0.22), (0.93, 0.36)] (b, c) = NIST_SP1065_table7[int(abs(alpha))] return b*(float(N)/float(m))-c else: return edf_simple(N, m, alpha)
Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7
Below is the the instruction that describes the task: ### Input: Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7 ### Response: def edf_totdev(N, m, alpha): """ Equivalent degrees of freedom for Total Deviation FIXME: what is the right behavior for alpha outside 0,-1,-2? NIST SP1065 page 41, Table 7 """ alpha = int(alpha) if alpha in [0, -1, -2]: # alpha 0 WFM # alpha -1 FFM # alpha -2 RWFM NIST_SP1065_table7 = [(1.50, 0.0), (1.17, 0.22), (0.93, 0.36)] (b, c) = NIST_SP1065_table7[int(abs(alpha))] return b*(float(N)/float(m))-c else: return edf_simple(N, m, alpha)
def orderAction(self, order:QA_Order): """ 委托回报 """ return self.pms[order.code][order.order_id].receive_order(order)
委托回报
Below is the the instruction that describes the task: ### Input: 委托回报 ### Response: def orderAction(self, order:QA_Order): """ 委托回报 """ return self.pms[order.code][order.order_id].receive_order(order)
def plot_ticks(ax, tick_fontsize=12, xticks=None, xticks_args=None, yticks=None, yticks_args=None, zticks=None, zticks_args=None): """Function that defines the labels options of a matplotlib plot. Args: ax: matplotlib axes tick_fontsize (int): Defines the size of the ticks' font xticks([list of ticks]): Defines the values of x ticks in the figure xticks_arg(dict): Passsed into matplotlib as xticks arguments yticks([list of ticks]): Defines the values of y ticks in the figure yticks_arg(dict): Passsed into matplotlib as yticks arguments zticks([list of ticks]): Defines the values of z ticks in the figure zticks_arg(dict): Passsed into matplotlib as zticks arguments """ if xticks is not None: ax.set_xticks(xticks) xticks_args = dict_if_none(xticks_args) ax.xaxis.set_tick_params(labelsize=tick_fontsize, **xticks_args) if yticks is not None: ax.set_yticks(yticks) yticks_args = dict_if_none(yticks_args) ax.yaxis.set_tick_params(labelsize=tick_fontsize, **yticks_args) if zticks is not None: ax.set_zticks(zticks) zticks_args = dict_if_none(zticks_args) ax.zaxis.set_tick_params(labelsize=tick_fontsize, **zticks_args)
Function that defines the labels options of a matplotlib plot. Args: ax: matplotlib axes tick_fontsize (int): Defines the size of the ticks' font xticks([list of ticks]): Defines the values of x ticks in the figure xticks_arg(dict): Passsed into matplotlib as xticks arguments yticks([list of ticks]): Defines the values of y ticks in the figure yticks_arg(dict): Passsed into matplotlib as yticks arguments zticks([list of ticks]): Defines the values of z ticks in the figure zticks_arg(dict): Passsed into matplotlib as zticks arguments
Below is the the instruction that describes the task: ### Input: Function that defines the labels options of a matplotlib plot. Args: ax: matplotlib axes tick_fontsize (int): Defines the size of the ticks' font xticks([list of ticks]): Defines the values of x ticks in the figure xticks_arg(dict): Passsed into matplotlib as xticks arguments yticks([list of ticks]): Defines the values of y ticks in the figure yticks_arg(dict): Passsed into matplotlib as yticks arguments zticks([list of ticks]): Defines the values of z ticks in the figure zticks_arg(dict): Passsed into matplotlib as zticks arguments ### Response: def plot_ticks(ax, tick_fontsize=12, xticks=None, xticks_args=None, yticks=None, yticks_args=None, zticks=None, zticks_args=None): """Function that defines the labels options of a matplotlib plot. Args: ax: matplotlib axes tick_fontsize (int): Defines the size of the ticks' font xticks([list of ticks]): Defines the values of x ticks in the figure xticks_arg(dict): Passsed into matplotlib as xticks arguments yticks([list of ticks]): Defines the values of y ticks in the figure yticks_arg(dict): Passsed into matplotlib as yticks arguments zticks([list of ticks]): Defines the values of z ticks in the figure zticks_arg(dict): Passsed into matplotlib as zticks arguments """ if xticks is not None: ax.set_xticks(xticks) xticks_args = dict_if_none(xticks_args) ax.xaxis.set_tick_params(labelsize=tick_fontsize, **xticks_args) if yticks is not None: ax.set_yticks(yticks) yticks_args = dict_if_none(yticks_args) ax.yaxis.set_tick_params(labelsize=tick_fontsize, **yticks_args) if zticks is not None: ax.set_zticks(zticks) zticks_args = dict_if_none(zticks_args) ax.zaxis.set_tick_params(labelsize=tick_fontsize, **zticks_args)
def download(self, name: str, force: bool = False ) -> bool: """ Attempts to download a given Docker image. If `force=True`, then any previously installed version of the image (described by the instructions) will be replaced by the image on DockerHub. Parameters: name: the name of the Docker image. Returns: `True` if successfully downloaded, otherwise `False`. """ try: self.__docker.images.pull(name) return True except docker.errors.NotFound: print("Failed to locate image on DockerHub: {}".format(name)) return False
Attempts to download a given Docker image. If `force=True`, then any previously installed version of the image (described by the instructions) will be replaced by the image on DockerHub. Parameters: name: the name of the Docker image. Returns: `True` if successfully downloaded, otherwise `False`.
Below is the the instruction that describes the task: ### Input: Attempts to download a given Docker image. If `force=True`, then any previously installed version of the image (described by the instructions) will be replaced by the image on DockerHub. Parameters: name: the name of the Docker image. Returns: `True` if successfully downloaded, otherwise `False`. ### Response: def download(self, name: str, force: bool = False ) -> bool: """ Attempts to download a given Docker image. If `force=True`, then any previously installed version of the image (described by the instructions) will be replaced by the image on DockerHub. Parameters: name: the name of the Docker image. Returns: `True` if successfully downloaded, otherwise `False`. """ try: self.__docker.images.pull(name) return True except docker.errors.NotFound: print("Failed to locate image on DockerHub: {}".format(name)) return False
def on_lstClassifications_itemSelectionChanged(self): """Update classification description label and unlock the Next button. .. note:: This is an automatic Qt slot executed when the field selection changes. """ self.clear_further_steps() classification = self.selected_classification() # Exit if no selection if not classification: return # Set description label self.lblDescribeClassification.setText(classification["description"]) # Enable the next button self.parent.pbnNext.setEnabled(True)
Update classification description label and unlock the Next button. .. note:: This is an automatic Qt slot executed when the field selection changes.
Below is the the instruction that describes the task: ### Input: Update classification description label and unlock the Next button. .. note:: This is an automatic Qt slot executed when the field selection changes. ### Response: def on_lstClassifications_itemSelectionChanged(self): """Update classification description label and unlock the Next button. .. note:: This is an automatic Qt slot executed when the field selection changes. """ self.clear_further_steps() classification = self.selected_classification() # Exit if no selection if not classification: return # Set description label self.lblDescribeClassification.setText(classification["description"]) # Enable the next button self.parent.pbnNext.setEnabled(True)
def _detect_database_platform(self): """ Detects and sets the database platform. Evaluates custom platform class and version in order to set the correct platform. :raises InvalidPlatformSpecified: if an invalid platform was specified for this connection. """ version = self._get_database_platform_version() if version is not None: self._platform = self._create_database_platform_for_version(version) else: self._platform = self.get_dbal_platform()
Detects and sets the database platform. Evaluates custom platform class and version in order to set the correct platform. :raises InvalidPlatformSpecified: if an invalid platform was specified for this connection.
Below is the the instruction that describes the task: ### Input: Detects and sets the database platform. Evaluates custom platform class and version in order to set the correct platform. :raises InvalidPlatformSpecified: if an invalid platform was specified for this connection. ### Response: def _detect_database_platform(self): """ Detects and sets the database platform. Evaluates custom platform class and version in order to set the correct platform. :raises InvalidPlatformSpecified: if an invalid platform was specified for this connection. """ version = self._get_database_platform_version() if version is not None: self._platform = self._create_database_platform_for_version(version) else: self._platform = self.get_dbal_platform()
def get_modifier_from_signature(self, modifier_signature): """ Return a modifier from a signature Args: modifier_name (str): signature of the modifier Returns: Modifier """ return next((m for m in self.modifiers if m.full_name == modifier_signature), None)
Return a modifier from a signature Args: modifier_name (str): signature of the modifier Returns: Modifier
Below is the the instruction that describes the task: ### Input: Return a modifier from a signature Args: modifier_name (str): signature of the modifier Returns: Modifier ### Response: def get_modifier_from_signature(self, modifier_signature): """ Return a modifier from a signature Args: modifier_name (str): signature of the modifier Returns: Modifier """ return next((m for m in self.modifiers if m.full_name == modifier_signature), None)
def find_token(request, token_type, service, **kwargs): """ The access token can be in a number of places. There are priority rules as to which one to use, abide by those: 1 If it's among the request parameters use that 2 If among the extra keyword arguments 3 Acquired by a previous run service. :param request: :param token_type: :param service: :param kwargs: :return: """ if request is not None: try: _token = request[token_type] except KeyError: pass else: del request[token_type] # Required under certain circumstances :-) not under other request.c_param[token_type] = SINGLE_OPTIONAL_STRING return _token try: return kwargs["access_token"] except KeyError: # I should pick the latest acquired token, this should be the right # order for that. _arg = service.multiple_extend_request_args( {}, kwargs['state'], ['access_token'], ['auth_response', 'token_response', 'refresh_token_response']) return _arg['access_token']
The access token can be in a number of places. There are priority rules as to which one to use, abide by those: 1 If it's among the request parameters use that 2 If among the extra keyword arguments 3 Acquired by a previous run service. :param request: :param token_type: :param service: :param kwargs: :return:
Below is the the instruction that describes the task: ### Input: The access token can be in a number of places. There are priority rules as to which one to use, abide by those: 1 If it's among the request parameters use that 2 If among the extra keyword arguments 3 Acquired by a previous run service. :param request: :param token_type: :param service: :param kwargs: :return: ### Response: def find_token(request, token_type, service, **kwargs): """ The access token can be in a number of places. There are priority rules as to which one to use, abide by those: 1 If it's among the request parameters use that 2 If among the extra keyword arguments 3 Acquired by a previous run service. :param request: :param token_type: :param service: :param kwargs: :return: """ if request is not None: try: _token = request[token_type] except KeyError: pass else: del request[token_type] # Required under certain circumstances :-) not under other request.c_param[token_type] = SINGLE_OPTIONAL_STRING return _token try: return kwargs["access_token"] except KeyError: # I should pick the latest acquired token, this should be the right # order for that. _arg = service.multiple_extend_request_args( {}, kwargs['state'], ['access_token'], ['auth_response', 'token_response', 'refresh_token_response']) return _arg['access_token']
def mul_inv(a, b): """ Modular inversion a mod b :param a: :param b: :return: """ b0 = b x0, x1 = 0, 1 if b == 1: return 1 while a > 1: q = a // b a, b = b, a % b x0, x1 = x1 - q * x0, x0 if x1 < 0: x1 += b0 return x1
Modular inversion a mod b :param a: :param b: :return:
Below is the the instruction that describes the task: ### Input: Modular inversion a mod b :param a: :param b: :return: ### Response: def mul_inv(a, b): """ Modular inversion a mod b :param a: :param b: :return: """ b0 = b x0, x1 = 0, 1 if b == 1: return 1 while a > 1: q = a // b a, b = b, a % b x0, x1 = x1 - q * x0, x0 if x1 < 0: x1 += b0 return x1
def derive_and_set_name_fields_and_slug( self, set_name_sort=True, set_slug=True ): """ Override this method from `CreatorBase` to handle additional name fields for Person creators. This method is called during `save()` """ super(PersonCreator, self).derive_and_set_name_fields_and_slug( set_name_sort=False, set_slug=False) # Collect person name fields, but only if they are not empty person_names = [ name for name in [self.name_family, self.name_given] if not is_empty(name) ] # if empty, set `name_sort` = '{name_family}, {name_given}' if these # person name values are available otherwise `name_full` if set_name_sort and is_empty(self.name_sort): if person_names: self.name_sort = ', '.join(person_names) else: self.name_sort = self.name_full # if empty, set `slug` to slugified '{name_family} {name_given}' if # these person name values are available otherwise slugified # `name_full` if set_slug and is_empty(self.slug): if person_names: self.slug = slugify(' '.join(person_names)) else: self.slug = slugify(self.name_full)
Override this method from `CreatorBase` to handle additional name fields for Person creators. This method is called during `save()`
Below is the the instruction that describes the task: ### Input: Override this method from `CreatorBase` to handle additional name fields for Person creators. This method is called during `save()` ### Response: def derive_and_set_name_fields_and_slug( self, set_name_sort=True, set_slug=True ): """ Override this method from `CreatorBase` to handle additional name fields for Person creators. This method is called during `save()` """ super(PersonCreator, self).derive_and_set_name_fields_and_slug( set_name_sort=False, set_slug=False) # Collect person name fields, but only if they are not empty person_names = [ name for name in [self.name_family, self.name_given] if not is_empty(name) ] # if empty, set `name_sort` = '{name_family}, {name_given}' if these # person name values are available otherwise `name_full` if set_name_sort and is_empty(self.name_sort): if person_names: self.name_sort = ', '.join(person_names) else: self.name_sort = self.name_full # if empty, set `slug` to slugified '{name_family} {name_given}' if # these person name values are available otherwise slugified # `name_full` if set_slug and is_empty(self.slug): if person_names: self.slug = slugify(' '.join(person_names)) else: self.slug = slugify(self.name_full)
def __compare_parameters(self, width, height, zoom, parameters): """Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else """ # Deactivated caching if not global_gui_config.get_config_value('ENABLE_CACHING', True): return False # Empty cache if not self.__image: return False # Changed image size if self.__width != width or self.__height != height: return False # Current zoom greater then prepared zoom if zoom > self.__zoom * self.__zoom_multiplicator: return False # Current zoom much smaller than prepared zoom, causes high memory usage and imperfect anti-aliasing if zoom < self.__zoom / self.__zoom_multiplicator: return False # Changed drawing parameter for key in parameters: try: if key not in self.__last_parameters or self.__last_parameters[key] != parameters[key]: return False except (AttributeError, ValueError): # Some values cannot be compared and raise an exception on comparison (e.g. numpy.ndarray). In this # case, just return False and do not cache. try: # Catch at least the ndarray-case, as this could occure relatively often import numpy if isinstance(self.__last_parameters[key], numpy.ndarray): return numpy.array_equal(self.__last_parameters[key], parameters[key]) except ImportError: return False return False return True
Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else
Below is the the instruction that describes the task: ### Input: Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else ### Response: def __compare_parameters(self, width, height, zoom, parameters): """Compare parameters for equality Checks if a cached image is existing, the the dimensions agree and finally if the properties are equal. If so, True is returned, else False, :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :return: True if all parameters are equal, False else """ # Deactivated caching if not global_gui_config.get_config_value('ENABLE_CACHING', True): return False # Empty cache if not self.__image: return False # Changed image size if self.__width != width or self.__height != height: return False # Current zoom greater then prepared zoom if zoom > self.__zoom * self.__zoom_multiplicator: return False # Current zoom much smaller than prepared zoom, causes high memory usage and imperfect anti-aliasing if zoom < self.__zoom / self.__zoom_multiplicator: return False # Changed drawing parameter for key in parameters: try: if key not in self.__last_parameters or self.__last_parameters[key] != parameters[key]: return False except (AttributeError, ValueError): # Some values cannot be compared and raise an exception on comparison (e.g. numpy.ndarray). In this # case, just return False and do not cache. try: # Catch at least the ndarray-case, as this could occure relatively often import numpy if isinstance(self.__last_parameters[key], numpy.ndarray): return numpy.array_equal(self.__last_parameters[key], parameters[key]) except ImportError: return False return False return True
def readList(self, register, length): """Read a length number of bytes from the specified register. Results will be returned as a bytearray.""" results = self._bus.read_i2c_block_data(self._address, register, length) self._logger.debug("Read the following from register 0x%02X: %s", register, results) return results
Read a length number of bytes from the specified register. Results will be returned as a bytearray.
Below is the the instruction that describes the task: ### Input: Read a length number of bytes from the specified register. Results will be returned as a bytearray. ### Response: def readList(self, register, length): """Read a length number of bytes from the specified register. Results will be returned as a bytearray.""" results = self._bus.read_i2c_block_data(self._address, register, length) self._logger.debug("Read the following from register 0x%02X: %s", register, results) return results
def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory.
Below is the the instruction that describes the task: ### Input: Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. ### Response: def default_links_factory_with_additional(additional_links): """Generate a links generation factory with the specified additional links. :param additional_links: A dict of link names to links to be added to the returned object. :returns: A link generation factory. """ def factory(pid, **kwargs): links = default_links_factory(pid) for link in additional_links: links[link] = additional_links[link].format(pid=pid, scheme=request.scheme, host=request.host) return links return factory
def rangize_supplement(spans,lngth): ''' spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)] rangize_supplement(spans,24) ''' rslt = [] si = 0 ei = spans[0][0] if(si == ei): pass else: rslt.append((si,ei)) prev_ei = spans[0][1] for i in range(1,spans.__len__()): si = prev_ei ei = spans[i][0] rslt.append((si,ei)) prev_ei = spans[i][1] if(prev_ei < lngth): rslt.append((prev_ei,lngth)) else: rslt.append((prev_ei,lngth+1)) return(rslt)
spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)] rangize_supplement(spans,24)
Below is the the instruction that describes the task: ### Input: spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)] rangize_supplement(spans,24) ### Response: def rangize_supplement(spans,lngth): ''' spans = [(0, 3), (4, 7), (8, 10), (11, 12), (13, 16), (17, 20)] rangize_supplement(spans,24) ''' rslt = [] si = 0 ei = spans[0][0] if(si == ei): pass else: rslt.append((si,ei)) prev_ei = spans[0][1] for i in range(1,spans.__len__()): si = prev_ei ei = spans[i][0] rslt.append((si,ei)) prev_ei = spans[i][1] if(prev_ei < lngth): rslt.append((prev_ei,lngth)) else: rslt.append((prev_ei,lngth+1)) return(rslt)
def _read_password_from_pgpass( *, passfile: typing.Optional[pathlib.Path], hosts: typing.List[str], ports: typing.List[int], database: str, user: str): """Parse the pgpass file and return the matching password. :return: Password string, if found, ``None`` otherwise. """ passtab = _read_password_file(passfile) if not passtab: return None for host, port in zip(hosts, ports): if host.startswith('/'): # Unix sockets get normalized into 'localhost' host = 'localhost' for phost, pport, pdatabase, puser, ppassword in passtab: if phost != '*' and phost != host: continue if pport != '*' and pport != str(port): continue if pdatabase != '*' and pdatabase != database: continue if puser != '*' and puser != user: continue # Found a match. return ppassword return None
Parse the pgpass file and return the matching password. :return: Password string, if found, ``None`` otherwise.
Below is the the instruction that describes the task: ### Input: Parse the pgpass file and return the matching password. :return: Password string, if found, ``None`` otherwise. ### Response: def _read_password_from_pgpass( *, passfile: typing.Optional[pathlib.Path], hosts: typing.List[str], ports: typing.List[int], database: str, user: str): """Parse the pgpass file and return the matching password. :return: Password string, if found, ``None`` otherwise. """ passtab = _read_password_file(passfile) if not passtab: return None for host, port in zip(hosts, ports): if host.startswith('/'): # Unix sockets get normalized into 'localhost' host = 'localhost' for phost, pport, pdatabase, puser, ppassword in passtab: if phost != '*' and phost != host: continue if pport != '*' and pport != str(port): continue if pdatabase != '*' and pdatabase != database: continue if puser != '*' and puser != user: continue # Found a match. return ppassword return None
def main(*argv): """ main driver of program """ try: # Inputs # adminUsername = argv[0] adminPassword = argv[1] siteURL = argv[2] groupTitle = argv[3] groupTags = argv[4] description = argv[5] access = argv[6] # Logic # # Connect to the site # sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword) admin = arcrest.manageorg.Administration(url=siteURL, securityHandler=sh, initialize=True) community = admin.community # Create Group # res = community.createGroup(title=groupTitle, tags=groupTags, description=description, snippet="", phone="", access=access, sortField="title", sortOrder="asc", isViewOnly=False, isInvitationOnly=False, thumbnail=None) arcpy.SetParameterAsText(7, str(res)) except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError, f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
main driver of program
Below is the the instruction that describes the task: ### Input: main driver of program ### Response: def main(*argv): """ main driver of program """ try: # Inputs # adminUsername = argv[0] adminPassword = argv[1] siteURL = argv[2] groupTitle = argv[3] groupTags = argv[4] description = argv[5] access = argv[6] # Logic # # Connect to the site # sh = arcrest.AGOLTokenSecurityHandler(adminUsername, adminPassword) admin = arcrest.manageorg.Administration(url=siteURL, securityHandler=sh, initialize=True) community = admin.community # Create Group # res = community.createGroup(title=groupTitle, tags=groupTags, description=description, snippet="", phone="", access=access, sortField="title", sortOrder="asc", isViewOnly=False, isInvitationOnly=False, thumbnail=None) arcpy.SetParameterAsText(7, str(res)) except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError, f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging): ''' models - input Versa models from which output is generated. Must be a sequence object, not an iterator ''' assert out is not None #Output stream required if not isinstance(models, list): models = [models] shorteners = shorteners or {} all_propertybase = [propertybase] if propertybase else [] all_propertybase.append(VERSA_BASEIRI) if any((base, propertybase, shorteners)): out.write('# @docheader\n\n* @iri:\n') if base: out.write(' * @base: {0}'.format(base)) #for k, v in shorteners: # out.write(' * @base: {0}'.format(base)) out.write('\n\n') origin_space = set() #base_out = models[0].base for m in models: origin_space.update(all_origins(m)) for o in origin_space: out.write('# {0}\n\n'.format(o)) for o_, r, t, a in m.match(o): abbr_r = abbreviate(r, all_propertybase) value_format(t) out.write('* {0}: {1}\n'.format(abbr_r, value_format(t))) for k, v in a.items(): abbr_k = abbreviate(k, all_propertybase) out.write(' * {0}: {1}\n'.format(k, value_format(v))) out.write('\n') return
models - input Versa models from which output is generated. Must be a sequence object, not an iterator
Below is the the instruction that describes the task: ### Input: models - input Versa models from which output is generated. Must be a sequence object, not an iterator ### Response: def write(models, out=None, base=None, propertybase=None, shorteners=None, logger=logging): ''' models - input Versa models from which output is generated. Must be a sequence object, not an iterator ''' assert out is not None #Output stream required if not isinstance(models, list): models = [models] shorteners = shorteners or {} all_propertybase = [propertybase] if propertybase else [] all_propertybase.append(VERSA_BASEIRI) if any((base, propertybase, shorteners)): out.write('# @docheader\n\n* @iri:\n') if base: out.write(' * @base: {0}'.format(base)) #for k, v in shorteners: # out.write(' * @base: {0}'.format(base)) out.write('\n\n') origin_space = set() #base_out = models[0].base for m in models: origin_space.update(all_origins(m)) for o in origin_space: out.write('# {0}\n\n'.format(o)) for o_, r, t, a in m.match(o): abbr_r = abbreviate(r, all_propertybase) value_format(t) out.write('* {0}: {1}\n'.format(abbr_r, value_format(t))) for k, v in a.items(): abbr_k = abbreviate(k, all_propertybase) out.write(' * {0}: {1}\n'.format(k, value_format(v))) out.write('\n') return
def day_to_month(timeperiod): """:param timeperiod: as string in YYYYMMDD00 format :return string in YYYYMM0000 format""" t = datetime.strptime(timeperiod, SYNERGY_DAILY_PATTERN) return t.strftime(SYNERGY_MONTHLY_PATTERN)
:param timeperiod: as string in YYYYMMDD00 format :return string in YYYYMM0000 format
Below is the the instruction that describes the task: ### Input: :param timeperiod: as string in YYYYMMDD00 format :return string in YYYYMM0000 format ### Response: def day_to_month(timeperiod): """:param timeperiod: as string in YYYYMMDD00 format :return string in YYYYMM0000 format""" t = datetime.strptime(timeperiod, SYNERGY_DAILY_PATTERN) return t.strftime(SYNERGY_MONTHLY_PATTERN)
def get_initial_states(self, input_var, init_state=None): """ :type input_var: T.var :rtype: dict """ initial_states = {} for state in self.state_names: if state != "state" or not init_state: if self._input_type == 'sequence' and input_var.ndim == 2: init_state = T.alloc(np.cast[env.FLOATX](0.), self.hidden_size) else: init_state = T.alloc(np.cast[env.FLOATX](0.), input_var.shape[0], self.hidden_size) initial_states[state] = init_state return initial_states
:type input_var: T.var :rtype: dict
Below is the the instruction that describes the task: ### Input: :type input_var: T.var :rtype: dict ### Response: def get_initial_states(self, input_var, init_state=None): """ :type input_var: T.var :rtype: dict """ initial_states = {} for state in self.state_names: if state != "state" or not init_state: if self._input_type == 'sequence' and input_var.ndim == 2: init_state = T.alloc(np.cast[env.FLOATX](0.), self.hidden_size) else: init_state = T.alloc(np.cast[env.FLOATX](0.), input_var.shape[0], self.hidden_size) initial_states[state] = init_state return initial_states
def download_loci(self): """ Uses a multi-threaded approach to download allele files """ # Setup the multiprocessing pool. pool = multiprocessing.Pool(processes=self.threads) # Map the list of loci URLs to the download method pool.map(self.download_threads, self.loci_url) pool.close() pool.join()
Uses a multi-threaded approach to download allele files
Below is the the instruction that describes the task: ### Input: Uses a multi-threaded approach to download allele files ### Response: def download_loci(self): """ Uses a multi-threaded approach to download allele files """ # Setup the multiprocessing pool. pool = multiprocessing.Pool(processes=self.threads) # Map the list of loci URLs to the download method pool.map(self.download_threads, self.loci_url) pool.close() pool.join()
def revert_to(self): ''' method to revert resource to this version by issuing PATCH Args: None Returns: None: sends PATCH request, and refreshes parent resource ''' # send patch response = self.resource.repo.api.http_request('PATCH', self.uri) # if response 204 if response.status_code == 204: logger.debug('reverting to previous version of resource, %s' % self.uri) # refresh current resource handle self._current_resource.refresh() else: raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri))
method to revert resource to this version by issuing PATCH Args: None Returns: None: sends PATCH request, and refreshes parent resource
Below is the the instruction that describes the task: ### Input: method to revert resource to this version by issuing PATCH Args: None Returns: None: sends PATCH request, and refreshes parent resource ### Response: def revert_to(self): ''' method to revert resource to this version by issuing PATCH Args: None Returns: None: sends PATCH request, and refreshes parent resource ''' # send patch response = self.resource.repo.api.http_request('PATCH', self.uri) # if response 204 if response.status_code == 204: logger.debug('reverting to previous version of resource, %s' % self.uri) # refresh current resource handle self._current_resource.refresh() else: raise Exception('HTTP %s, could not revert to resource version, %s' % (response.status_code, self.uri))
def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None, parallel=None, workflow=None): """Run variant analysis, handling command line options. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" workdir = utils.safe_makedir(os.path.abspath(workdir)) os.chdir(workdir) config, config_file = config_utils.load_system_config(config_file, workdir) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR) if parallel["type"] in ["local", "clusterk"]: _setup_resources() _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "ipython": assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" if parallel["scheduler"] != "sge": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" elif not parallel["queue"]: parallel["queue"] = "" _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
Run variant analysis, handling command line options.
Below is the the instruction that describes the task: ### Input: Run variant analysis, handling command line options. ### Response: def run_main(workdir, config_file=None, fc_dir=None, run_info_yaml=None, parallel=None, workflow=None): """Run variant analysis, handling command line options. """ # Set environment to standard to use periods for decimals and avoid localization os.environ["LC_ALL"] = "C" os.environ["LC"] = "C" os.environ["LANG"] = "C" workdir = utils.safe_makedir(os.path.abspath(workdir)) os.chdir(workdir) config, config_file = config_utils.load_system_config(config_file, workdir) if config.get("log_dir", None) is None: config["log_dir"] = os.path.join(workdir, DEFAULT_LOG_DIR) if parallel["type"] in ["local", "clusterk"]: _setup_resources() _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) elif parallel["type"] == "ipython": assert parallel["scheduler"] is not None, "IPython parallel requires a specified scheduler (-s)" if parallel["scheduler"] != "sge": assert parallel["queue"] is not None, "IPython parallel requires a specified queue (-q)" elif not parallel["queue"]: parallel["queue"] = "" _run_toplevel(config, config_file, workdir, parallel, fc_dir, run_info_yaml) else: raise ValueError("Unexpected type of parallel run: %s" % parallel["type"])
def _build_url_rewriter(cls, session: AppSession): '''Build URL rewriter if needed.''' if session.args.escaped_fragment or session.args.strip_session_id: return session.factory.new( 'URLRewriter', hash_fragment=session.args.escaped_fragment, session_id=session.args.strip_session_id )
Build URL rewriter if needed.
Below is the the instruction that describes the task: ### Input: Build URL rewriter if needed. ### Response: def _build_url_rewriter(cls, session: AppSession): '''Build URL rewriter if needed.''' if session.args.escaped_fragment or session.args.strip_session_id: return session.factory.new( 'URLRewriter', hash_fragment=session.args.escaped_fragment, session_id=session.args.strip_session_id )
def cummax(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: self._validate_dtypes() return self.__constructor__( query_compiler=self._query_compiler.cummax( axis=axis, skipna=skipna, **kwargs ) )
Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame.
Below is the the instruction that describes the task: ### Input: Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame. ### Response: def cummax(self, axis=None, skipna=True, *args, **kwargs): """Perform a cumulative maximum across the DataFrame. Args: axis (int): The axis to take maximum on. skipna (bool): True to skip NA values, false otherwise. Returns: The cumulative maximum of the DataFrame. """ axis = self._get_axis_number(axis) if axis is not None else 0 if axis: self._validate_dtypes() return self.__constructor__( query_compiler=self._query_compiler.cummax( axis=axis, skipna=skipna, **kwargs ) )
def get_versioned_viewname(self, viewname, request): """ Prefix viewname with full namespace bananas:vX.Y: """ namespace = request.resolver_match.namespace if namespace: viewname = "{}:{}".format(namespace, viewname) return viewname
Prefix viewname with full namespace bananas:vX.Y:
Below is the the instruction that describes the task: ### Input: Prefix viewname with full namespace bananas:vX.Y: ### Response: def get_versioned_viewname(self, viewname, request): """ Prefix viewname with full namespace bananas:vX.Y: """ namespace = request.resolver_match.namespace if namespace: viewname = "{}:{}".format(namespace, viewname) return viewname
def clients(self, protocol=None, groups=None): """Returns a list of :py:class:`.Client` for the specific query by the user. Keyword Parameters: protocol Ignored. groups The groups (types) to which the clients belong either from ('Genuine', 'Impostor') Note that 'eval' is an alias for 'Genuine'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the clients which have the given properties. """ groups = self.__group_replace_eval_by_genuine__(groups) groups = self.check_parameters_for_validity(groups, "group", self.client_types()) # List of the clients q = self.query(Client) if groups: q = q.filter(Client.stype.in_(groups)) q = q.order_by(Client.id) return list(q)
Returns a list of :py:class:`.Client` for the specific query by the user. Keyword Parameters: protocol Ignored. groups The groups (types) to which the clients belong either from ('Genuine', 'Impostor') Note that 'eval' is an alias for 'Genuine'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the clients which have the given properties.
Below is the the instruction that describes the task: ### Input: Returns a list of :py:class:`.Client` for the specific query by the user. Keyword Parameters: protocol Ignored. groups The groups (types) to which the clients belong either from ('Genuine', 'Impostor') Note that 'eval' is an alias for 'Genuine'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the clients which have the given properties. ### Response: def clients(self, protocol=None, groups=None): """Returns a list of :py:class:`.Client` for the specific query by the user. Keyword Parameters: protocol Ignored. groups The groups (types) to which the clients belong either from ('Genuine', 'Impostor') Note that 'eval' is an alias for 'Genuine'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the clients which have the given properties. """ groups = self.__group_replace_eval_by_genuine__(groups) groups = self.check_parameters_for_validity(groups, "group", self.client_types()) # List of the clients q = self.query(Client) if groups: q = q.filter(Client.stype.in_(groups)) q = q.order_by(Client.id) return list(q)
def run(self, *args, **kwargs): """ Connect and run bot in event loop. """ self.eventloop.run_until_complete(self.connect(*args, **kwargs)) try: self.eventloop.run_forever() finally: self.eventloop.stop()
Connect and run bot in event loop.
Below is the the instruction that describes the task: ### Input: Connect and run bot in event loop. ### Response: def run(self, *args, **kwargs): """ Connect and run bot in event loop. """ self.eventloop.run_until_complete(self.connect(*args, **kwargs)) try: self.eventloop.run_forever() finally: self.eventloop.stop()
def put(self, key, value): '''Stores the object named by `key`. Follows links.''' # if value is a link, don't follow links if self._link_for_value(value): super(SymlinkDatastore, self).put(key, value) return # if `key` points to a symlink, need to follow it. current_value = super(SymlinkDatastore, self).get(key) link_key = self._link_for_value(current_value) if link_key: self.put(link_key, value) # self.put: could be another link. else: super(SymlinkDatastore, self).put(key, value)
Stores the object named by `key`. Follows links.
Below is the the instruction that describes the task: ### Input: Stores the object named by `key`. Follows links. ### Response: def put(self, key, value): '''Stores the object named by `key`. Follows links.''' # if value is a link, don't follow links if self._link_for_value(value): super(SymlinkDatastore, self).put(key, value) return # if `key` points to a symlink, need to follow it. current_value = super(SymlinkDatastore, self).get(key) link_key = self._link_for_value(current_value) if link_key: self.put(link_key, value) # self.put: could be another link. else: super(SymlinkDatastore, self).put(key, value)
def on_touch_down(self, touch): """Check for collisions and select an appropriate entity.""" if hasattr(self, '_lasttouch') and self._lasttouch == touch: return if not self.collide_point(*touch.pos): return touch.push() touch.apply_transform_2d(self.to_local) if self.app.selection: if self.app.selection.collide_point(*touch.pos): Logger.debug("Board: hit selection") touch.grab(self.app.selection) pawns = list(self.pawns_at(*touch.pos)) if pawns: Logger.debug("Board: hit {} pawns".format(len(pawns))) self.selection_candidates = pawns if self.app.selection in self.selection_candidates: self.selection_candidates.remove(self.app.selection) touch.pop() return True spots = list(self.spots_at(*touch.pos)) if spots: Logger.debug("Board: hit {} spots".format(len(spots))) self.selection_candidates = spots if self.adding_portal: self.origspot = self.selection_candidates.pop(0) self.protodest = Dummy( name='protodest', pos=touch.pos, size=(0, 0) ) self.add_widget(self.protodest) self.protodest.on_touch_down(touch) self.protoportal = self.proto_arrow_cls( origin=self.origspot, destination=self.protodest ) self.add_widget(self.protoportal) if self.reciprocal_portal: self.protoportal2 = self.proto_arrow_cls( destination=self.origspot, origin=self.protodest ) self.add_widget(self.protoportal2) touch.pop() return True arrows = list(self.arrows_at(*touch.pos)) if arrows: Logger.debug("Board: hit {} arrows".format(len(arrows))) self.selection_candidates = arrows if self.app.selection in self.selection_candidates: self.selection_candidates.remove(self.app.selection) touch.pop() return True touch.pop()
Check for collisions and select an appropriate entity.
Below is the the instruction that describes the task: ### Input: Check for collisions and select an appropriate entity. ### Response: def on_touch_down(self, touch): """Check for collisions and select an appropriate entity.""" if hasattr(self, '_lasttouch') and self._lasttouch == touch: return if not self.collide_point(*touch.pos): return touch.push() touch.apply_transform_2d(self.to_local) if self.app.selection: if self.app.selection.collide_point(*touch.pos): Logger.debug("Board: hit selection") touch.grab(self.app.selection) pawns = list(self.pawns_at(*touch.pos)) if pawns: Logger.debug("Board: hit {} pawns".format(len(pawns))) self.selection_candidates = pawns if self.app.selection in self.selection_candidates: self.selection_candidates.remove(self.app.selection) touch.pop() return True spots = list(self.spots_at(*touch.pos)) if spots: Logger.debug("Board: hit {} spots".format(len(spots))) self.selection_candidates = spots if self.adding_portal: self.origspot = self.selection_candidates.pop(0) self.protodest = Dummy( name='protodest', pos=touch.pos, size=(0, 0) ) self.add_widget(self.protodest) self.protodest.on_touch_down(touch) self.protoportal = self.proto_arrow_cls( origin=self.origspot, destination=self.protodest ) self.add_widget(self.protoportal) if self.reciprocal_portal: self.protoportal2 = self.proto_arrow_cls( destination=self.origspot, origin=self.protodest ) self.add_widget(self.protoportal2) touch.pop() return True arrows = list(self.arrows_at(*touch.pos)) if arrows: Logger.debug("Board: hit {} arrows".format(len(arrows))) self.selection_candidates = arrows if self.app.selection in self.selection_candidates: self.selection_candidates.remove(self.app.selection) touch.pop() return True touch.pop()
def start(self): """ instanciate request session with authent :return: """ LOGGER.debug("rest.Driver.start") self.session = requests.Session() self.session.auth = (self.user, self.password)
instanciate request session with authent :return:
Below is the the instruction that describes the task: ### Input: instanciate request session with authent :return: ### Response: def start(self): """ instanciate request session with authent :return: """ LOGGER.debug("rest.Driver.start") self.session = requests.Session() self.session.auth = (self.user, self.password)
def get_comment_lookup_session(self): """Gets the ``OsidSession`` associated with the comment lookup service. return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` is ``true``.* """ if not self.supports_comment_lookup(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentLookupSession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the comment lookup service. return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` is ``true``.*
Below is the the instruction that describes the task: ### Input: Gets the ``OsidSession`` associated with the comment lookup service. return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` is ``true``.* ### Response: def get_comment_lookup_session(self): """Gets the ``OsidSession`` associated with the comment lookup service. return: (osid.commenting.CommentLookupSession) - a ``CommentLookupSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_lookup()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_lookup()`` is ``true``.* """ if not self.supports_comment_lookup(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentLookupSession(runtime=self._runtime)
def unstack(self, unstacker_func, fill_value): """Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ n_rows = self.shape[-1] dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() new_blocks = [] columns_mask = [] for blk in self.blocks: blocks, mask = blk._unstack( partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]), new_columns, n_rows, fill_value ) new_blocks.extend(blocks) columns_mask.extend(mask) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index]) return bm
Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager
Below is the the instruction that describes the task: ### Input: Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager ### Response: def unstack(self, unstacker_func, fill_value): """Return a blockmanager with all blocks unstacked. Parameters ---------- unstacker_func : callable A (partially-applied) ``pd.core.reshape._Unstacker`` class. fill_value : Any fill_value for newly introduced missing values. Returns ------- unstacked : BlockManager """ n_rows = self.shape[-1] dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() new_blocks = [] columns_mask = [] for blk in self.blocks: blocks, mask = blk._unstack( partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]), new_columns, n_rows, fill_value ) new_blocks.extend(blocks) columns_mask.extend(mask) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index]) return bm
def intersects_segment(self, seg): """ Returns True if any segmentlist in self intersects the segment, otherwise returns False. """ return any(value.intersects_segment(seg) for value in self.itervalues())
Returns True if any segmentlist in self intersects the segment, otherwise returns False.
Below is the the instruction that describes the task: ### Input: Returns True if any segmentlist in self intersects the segment, otherwise returns False. ### Response: def intersects_segment(self, seg): """ Returns True if any segmentlist in self intersects the segment, otherwise returns False. """ return any(value.intersects_segment(seg) for value in self.itervalues())
def calculate_concat_output_shapes(operator): ''' Allowed input/output patterns are 1. [N_1, C, H, W], ..., [N_n, C, H, W] ---> [N_1 + ... + N_n, C, H, W] 2. [N, C_1, H, W], ..., [N, C_n, H, W] ---> [N, C_1 + ... + C_n, H, W] ''' check_input_and_output_numbers(operator, input_count_range=[1, None], output_count_range=[1, 1]) output_shape = copy.deepcopy(operator.inputs[0].type.shape) dims = [] for variable in operator.inputs: if variable.type.shape[0] != 'None' and variable.type.shape[0] != output_shape[0]: raise RuntimeError('Only dimensions along C-axis can be different') if variable.type.shape[2] != 'None' and variable.type.shape[2] != output_shape[2]: raise RuntimeError('Only dimensions along C-axis can be different') if variable.type.shape[3] != 'None' and variable.type.shape[3] != output_shape[3]: raise RuntimeError('Only dimensions along C-axis can be different') dims.append(variable.type.shape[1]) output_shape[1] = 'None' if 'None' in dims else sum(dims) operator.outputs[0].type.shape = output_shape
Allowed input/output patterns are 1. [N_1, C, H, W], ..., [N_n, C, H, W] ---> [N_1 + ... + N_n, C, H, W] 2. [N, C_1, H, W], ..., [N, C_n, H, W] ---> [N, C_1 + ... + C_n, H, W]
Below is the the instruction that describes the task: ### Input: Allowed input/output patterns are 1. [N_1, C, H, W], ..., [N_n, C, H, W] ---> [N_1 + ... + N_n, C, H, W] 2. [N, C_1, H, W], ..., [N, C_n, H, W] ---> [N, C_1 + ... + C_n, H, W] ### Response: def calculate_concat_output_shapes(operator): ''' Allowed input/output patterns are 1. [N_1, C, H, W], ..., [N_n, C, H, W] ---> [N_1 + ... + N_n, C, H, W] 2. [N, C_1, H, W], ..., [N, C_n, H, W] ---> [N, C_1 + ... + C_n, H, W] ''' check_input_and_output_numbers(operator, input_count_range=[1, None], output_count_range=[1, 1]) output_shape = copy.deepcopy(operator.inputs[0].type.shape) dims = [] for variable in operator.inputs: if variable.type.shape[0] != 'None' and variable.type.shape[0] != output_shape[0]: raise RuntimeError('Only dimensions along C-axis can be different') if variable.type.shape[2] != 'None' and variable.type.shape[2] != output_shape[2]: raise RuntimeError('Only dimensions along C-axis can be different') if variable.type.shape[3] != 'None' and variable.type.shape[3] != output_shape[3]: raise RuntimeError('Only dimensions along C-axis can be different') dims.append(variable.type.shape[1]) output_shape[1] = 'None' if 'None' in dims else sum(dims) operator.outputs[0].type.shape = output_shape
def clearkml(self): '''Clear the kmls from the map''' #go through all the current layers and remove them for layer in self.curlayers: self.mpstate.map.remove_object(layer) for layer in self.curtextlayers: self.mpstate.map.remove_object(layer) self.allayers = [] self.curlayers = [] self.alltextlayers = [] self.curtextlayers = [] self.menu_needs_refreshing = True
Clear the kmls from the map
Below is the the instruction that describes the task: ### Input: Clear the kmls from the map ### Response: def clearkml(self): '''Clear the kmls from the map''' #go through all the current layers and remove them for layer in self.curlayers: self.mpstate.map.remove_object(layer) for layer in self.curtextlayers: self.mpstate.map.remove_object(layer) self.allayers = [] self.curlayers = [] self.alltextlayers = [] self.curtextlayers = [] self.menu_needs_refreshing = True
def _outputs(self): """List of layers containing outputs from the IF. :returns: A list of vector layers. :rtype: list """ layers = OrderedDict() layers[layer_purpose_exposure_summary['key']] = ( self._exposure_summary) layers[layer_purpose_aggregate_hazard_impacted['key']] = ( self._aggregate_hazard_impacted) layers[layer_purpose_aggregation_summary['key']] = ( self._aggregation_summary) layers[layer_purpose_analysis_impacted['key']] = ( self._analysis_impacted) layers[layer_purpose_exposure_summary_table['key']] = ( self._exposure_summary_table) layers[layer_purpose_profiling['key']] = self._profiling_table # Extra layers produced by pre-processing layers.update(self._preprocessors_layers) for expected_purpose, layer in list(layers.items()): if layer: purpose = layer.keywords.get('layer_purpose') if purpose != expected_purpose: # ET 18/11/16 # I'm disabling this check. If an exception is raised in # the IF, this exception might be raised and will hide the # other one. # raise Exception('Wrong layer purpose : %s != %s' % ( # purpose, expected_purpose)) pass # Remove layers which are not set. layers = [layer for layer in list(layers.values()) if layer] return layers
List of layers containing outputs from the IF. :returns: A list of vector layers. :rtype: list
Below is the the instruction that describes the task: ### Input: List of layers containing outputs from the IF. :returns: A list of vector layers. :rtype: list ### Response: def _outputs(self): """List of layers containing outputs from the IF. :returns: A list of vector layers. :rtype: list """ layers = OrderedDict() layers[layer_purpose_exposure_summary['key']] = ( self._exposure_summary) layers[layer_purpose_aggregate_hazard_impacted['key']] = ( self._aggregate_hazard_impacted) layers[layer_purpose_aggregation_summary['key']] = ( self._aggregation_summary) layers[layer_purpose_analysis_impacted['key']] = ( self._analysis_impacted) layers[layer_purpose_exposure_summary_table['key']] = ( self._exposure_summary_table) layers[layer_purpose_profiling['key']] = self._profiling_table # Extra layers produced by pre-processing layers.update(self._preprocessors_layers) for expected_purpose, layer in list(layers.items()): if layer: purpose = layer.keywords.get('layer_purpose') if purpose != expected_purpose: # ET 18/11/16 # I'm disabling this check. If an exception is raised in # the IF, this exception might be raised and will hide the # other one. # raise Exception('Wrong layer purpose : %s != %s' % ( # purpose, expected_purpose)) pass # Remove layers which are not set. layers = [layer for layer in list(layers.values()) if layer] return layers
def nltides_gw_phase_difference(f, f0, amplitude, n, m1, m2): """Calculate the gravitational-wave phase shift bwtween f and f_coalescence = infinity due to non-linear tides. To compute the phase shift between e.g. f_low and f_isco, call this function twice and compute the difference. Parameters ---------- f: float or numpy.array Frequency from which to compute phase f0: float or numpy.array Frequency that NL effects switch on amplitude: float or numpy.array Amplitude of effect n: float or numpy.array Growth dependence of effect m1: float or numpy.array Mass of component 1 m2: float or numpy.array Mass of component 2 Returns ------- delta_phi: float or numpy.array Phase in radians """ f, f0, amplitude, n, m1, m2, input_is_array = ensurearray( f, f0, amplitude, n, m1, m2) delta_phi = numpy.zeros(m1.shape) f_ref, _, phi_of_f_factor = nltides_coefs(amplitude, n, m1, m2) mask = f <= f0 delta_phi[mask] = - phi_of_f_factor[mask] * (f0[mask]/f_ref)**(n[mask]-3.) mask = f > f0 delta_phi[mask] = - phi_of_f_factor[mask] * (f[mask]/f_ref)**(n[mask]-3.) return formatreturn(delta_phi, input_is_array)
Calculate the gravitational-wave phase shift bwtween f and f_coalescence = infinity due to non-linear tides. To compute the phase shift between e.g. f_low and f_isco, call this function twice and compute the difference. Parameters ---------- f: float or numpy.array Frequency from which to compute phase f0: float or numpy.array Frequency that NL effects switch on amplitude: float or numpy.array Amplitude of effect n: float or numpy.array Growth dependence of effect m1: float or numpy.array Mass of component 1 m2: float or numpy.array Mass of component 2 Returns ------- delta_phi: float or numpy.array Phase in radians
Below is the the instruction that describes the task: ### Input: Calculate the gravitational-wave phase shift bwtween f and f_coalescence = infinity due to non-linear tides. To compute the phase shift between e.g. f_low and f_isco, call this function twice and compute the difference. Parameters ---------- f: float or numpy.array Frequency from which to compute phase f0: float or numpy.array Frequency that NL effects switch on amplitude: float or numpy.array Amplitude of effect n: float or numpy.array Growth dependence of effect m1: float or numpy.array Mass of component 1 m2: float or numpy.array Mass of component 2 Returns ------- delta_phi: float or numpy.array Phase in radians ### Response: def nltides_gw_phase_difference(f, f0, amplitude, n, m1, m2): """Calculate the gravitational-wave phase shift bwtween f and f_coalescence = infinity due to non-linear tides. To compute the phase shift between e.g. f_low and f_isco, call this function twice and compute the difference. Parameters ---------- f: float or numpy.array Frequency from which to compute phase f0: float or numpy.array Frequency that NL effects switch on amplitude: float or numpy.array Amplitude of effect n: float or numpy.array Growth dependence of effect m1: float or numpy.array Mass of component 1 m2: float or numpy.array Mass of component 2 Returns ------- delta_phi: float or numpy.array Phase in radians """ f, f0, amplitude, n, m1, m2, input_is_array = ensurearray( f, f0, amplitude, n, m1, m2) delta_phi = numpy.zeros(m1.shape) f_ref, _, phi_of_f_factor = nltides_coefs(amplitude, n, m1, m2) mask = f <= f0 delta_phi[mask] = - phi_of_f_factor[mask] * (f0[mask]/f_ref)**(n[mask]-3.) mask = f > f0 delta_phi[mask] = - phi_of_f_factor[mask] * (f[mask]/f_ref)**(n[mask]-3.) return formatreturn(delta_phi, input_is_array)
def _build_arg_list(self, **kwargs): """Build list of arguments from the dict; keys must be valid gromacs flags.""" arglist = [] for flag, value in kwargs.items(): # XXX: check flag against allowed values flag = str(flag) if flag.startswith('_'): flag = flag[1:] # python-illegal keywords are '_'-quoted if not flag.startswith('-'): flag = '-' + flag # now flag is guaranteed to start with '-' if value is True: arglist.append(flag) # simple command line flag elif value is False: if flag.startswith('-no'): # negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?) arglist.append('-' + flag[3:]) else: arglist.append('-no' + flag[1:]) # gromacs switches booleans by prefixing 'no' elif value is None: pass # ignore flag = None else: try: arglist.extend([flag] + value) # option with value list except TypeError: arglist.extend([flag, value]) # option with single value return list(map(str, arglist))
Build list of arguments from the dict; keys must be valid gromacs flags.
Below is the the instruction that describes the task: ### Input: Build list of arguments from the dict; keys must be valid gromacs flags. ### Response: def _build_arg_list(self, **kwargs): """Build list of arguments from the dict; keys must be valid gromacs flags.""" arglist = [] for flag, value in kwargs.items(): # XXX: check flag against allowed values flag = str(flag) if flag.startswith('_'): flag = flag[1:] # python-illegal keywords are '_'-quoted if not flag.startswith('-'): flag = '-' + flag # now flag is guaranteed to start with '-' if value is True: arglist.append(flag) # simple command line flag elif value is False: if flag.startswith('-no'): # negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?) arglist.append('-' + flag[3:]) else: arglist.append('-no' + flag[1:]) # gromacs switches booleans by prefixing 'no' elif value is None: pass # ignore flag = None else: try: arglist.extend([flag] + value) # option with value list except TypeError: arglist.extend([flag, value]) # option with single value return list(map(str, arglist))
def angle(array_of_xyzs): """ Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints. """ ab = array_of_xyzs[0] - array_of_xyzs[1] cb = array_of_xyzs[2] - array_of_xyzs[1] return np.arccos((np.dot(ab,cb)) / (np.sqrt(ab[0]**2 + ab[1]**2 \ + ab[2]**2) * np.sqrt(cb[0]**2 + cb[1]**2 + cb[2]**2)))
Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints.
Below is the the instruction that describes the task: ### Input: Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints. ### Response: def angle(array_of_xyzs): """ Calculates angle between three coordinate points (I could not find a package that does this but if one exists that would probably be better). Used for Angle constraints. """ ab = array_of_xyzs[0] - array_of_xyzs[1] cb = array_of_xyzs[2] - array_of_xyzs[1] return np.arccos((np.dot(ab,cb)) / (np.sqrt(ab[0]**2 + ab[1]**2 \ + ab[2]**2) * np.sqrt(cb[0]**2 + cb[1]**2 + cb[2]**2)))
def restore_review_history_for_affected_objects(portal): """Applies the review history for objects that are bound to new senaite_* workflows """ logger.info("Restoring review_history ...") query = dict(portal_type=NEW_SENAITE_WORKFLOW_BINDINGS) brains = api.search(query, UID_CATALOG) total = len(brains) done = 0 for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Restoring review_history: {}/{}" .format(num, total)) review_history = api.get_review_history(brain, rev=False) if review_history: # Nothing to do. The object already has the review history set continue # Object without review history. Set the review_history manually restore_review_history_for(brain) done += 1 if done % 1000 == 0: commit_transaction(portal) logger.info("Restoring review history: {} processed [DONE]".format(done))
Applies the review history for objects that are bound to new senaite_* workflows
Below is the the instruction that describes the task: ### Input: Applies the review history for objects that are bound to new senaite_* workflows ### Response: def restore_review_history_for_affected_objects(portal): """Applies the review history for objects that are bound to new senaite_* workflows """ logger.info("Restoring review_history ...") query = dict(portal_type=NEW_SENAITE_WORKFLOW_BINDINGS) brains = api.search(query, UID_CATALOG) total = len(brains) done = 0 for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Restoring review_history: {}/{}" .format(num, total)) review_history = api.get_review_history(brain, rev=False) if review_history: # Nothing to do. The object already has the review history set continue # Object without review history. Set the review_history manually restore_review_history_for(brain) done += 1 if done % 1000 == 0: commit_transaction(portal) logger.info("Restoring review history: {} processed [DONE]".format(done))
def create_signed_tx(network, spendables, payables, wifs=[], fee="standard", lock_time=0, version=1, **kwargs): """ This convenience function calls :func:`create_tx` and :func:`sign_tx` in turn. Read the documentation for those functions for information on the parameters. Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> wifs = ["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"] >>> payables = ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"] >>> tx = create_signed_tx(network, spendables, payables, wifs=wifs, fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never). """ tx = create_tx(network, spendables, payables, fee=fee, lock_time=lock_time, version=version) sign_tx(network, tx, wifs=wifs, **kwargs) for idx, tx_out in enumerate(tx.txs_in): if not tx.is_solution_ok(idx): raise SecretExponentMissing("failed to sign spendable for %s" % tx.unspents[idx].address()) return tx
This convenience function calls :func:`create_tx` and :func:`sign_tx` in turn. Read the documentation for those functions for information on the parameters. Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> wifs = ["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"] >>> payables = ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"] >>> tx = create_signed_tx(network, spendables, payables, wifs=wifs, fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never).
Below is the the instruction that describes the task: ### Input: This convenience function calls :func:`create_tx` and :func:`sign_tx` in turn. Read the documentation for those functions for information on the parameters. Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> wifs = ["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"] >>> payables = ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"] >>> tx = create_signed_tx(network, spendables, payables, wifs=wifs, fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never). ### Response: def create_signed_tx(network, spendables, payables, wifs=[], fee="standard", lock_time=0, version=1, **kwargs): """ This convenience function calls :func:`create_tx` and :func:`sign_tx` in turn. Read the documentation for those functions for information on the parameters. Usage:: >>> spendables = spendables_for_address("1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH") >>> wifs = ["KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn"] >>> payables = ["1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP"] >>> tx = create_signed_tx(network, spendables, payables, wifs=wifs, fee=0) This will move all available reported funds from 1BgGZ9tcN4rm9KBzDn7KprQz87SZ26SAMH to 1cMh228HTCiwS8ZsaakH8A8wze1JR5ZsP, with no transaction fees (which means it might take a while to confirm, possibly never). """ tx = create_tx(network, spendables, payables, fee=fee, lock_time=lock_time, version=version) sign_tx(network, tx, wifs=wifs, **kwargs) for idx, tx_out in enumerate(tx.txs_in): if not tx.is_solution_ok(idx): raise SecretExponentMissing("failed to sign spendable for %s" % tx.unspents[idx].address()) return tx
def pretty_descriptor(self): """ assemble a long member name from access flags, type, argument types, exceptions as applicable """ f = " ".join(self.pretty_access_flags()) p = self.pretty_type() n = self.get_name() t = ",".join(self.pretty_exceptions()) if n == "<init>": # we pretend that there's no return type, even though it's # V for constructors p = None if self.is_method: # stick the name and args together so there's no space n = "%s(%s)" % (n, ",".join(self.pretty_arg_types())) if t: # assemble any throws as necessary t = "throws " + t return " ".join(z for z in (f, p, n, t) if z)
assemble a long member name from access flags, type, argument types, exceptions as applicable
Below is the the instruction that describes the task: ### Input: assemble a long member name from access flags, type, argument types, exceptions as applicable ### Response: def pretty_descriptor(self): """ assemble a long member name from access flags, type, argument types, exceptions as applicable """ f = " ".join(self.pretty_access_flags()) p = self.pretty_type() n = self.get_name() t = ",".join(self.pretty_exceptions()) if n == "<init>": # we pretend that there's no return type, even though it's # V for constructors p = None if self.is_method: # stick the name and args together so there's no space n = "%s(%s)" % (n, ",".join(self.pretty_arg_types())) if t: # assemble any throws as necessary t = "throws " + t return " ".join(z for z in (f, p, n, t) if z)
def first(self, x): """Returns first found index of given value (or list of values).""" def _find(x): try: return self.all.index(str(x)) except ValueError: return None if _is_collection(x): for item in x: found = _find(item) if found is not None: return found return None else: return _find(x)
Returns first found index of given value (or list of values).
Below is the the instruction that describes the task: ### Input: Returns first found index of given value (or list of values). ### Response: def first(self, x): """Returns first found index of given value (or list of values).""" def _find(x): try: return self.all.index(str(x)) except ValueError: return None if _is_collection(x): for item in x: found = _find(item) if found is not None: return found return None else: return _find(x)
def is_empty(self): ''' Return `True` if form is valid and contains an empty lookup. ''' return (self.is_valid() and not self.simple_lookups and not self.complex_conditions and not self.extra_conditions)
Return `True` if form is valid and contains an empty lookup.
Below is the the instruction that describes the task: ### Input: Return `True` if form is valid and contains an empty lookup. ### Response: def is_empty(self): ''' Return `True` if form is valid and contains an empty lookup. ''' return (self.is_valid() and not self.simple_lookups and not self.complex_conditions and not self.extra_conditions)
def sorted_enums(self) -> List[Tuple[str, int]]: """Return list of enum items sorted by value.""" return sorted(self.enum.items(), key=lambda x: x[1])
Return list of enum items sorted by value.
Below is the the instruction that describes the task: ### Input: Return list of enum items sorted by value. ### Response: def sorted_enums(self) -> List[Tuple[str, int]]: """Return list of enum items sorted by value.""" return sorted(self.enum.items(), key=lambda x: x[1])
def leaveChat(self, chat_id): """ See: https://core.telegram.org/bots/api#leavechat """ p = _strip(locals()) return self._api_request('leaveChat', _rectify(p))
See: https://core.telegram.org/bots/api#leavechat
Below is the the instruction that describes the task: ### Input: See: https://core.telegram.org/bots/api#leavechat ### Response: def leaveChat(self, chat_id): """ See: https://core.telegram.org/bots/api#leavechat """ p = _strip(locals()) return self._api_request('leaveChat', _rectify(p))
def _freq_parser(self, freq): """Parse timedelta. Valid keywords "days", "day", "d", "hours", "hour", "h", "minutes", "minute", "min", "m", "seconds", "second", "sec", "s", "weeks", "week", "w", """ freq = freq.lower().strip() valid_keywords = [ "days", "day", "d", "hours", "hour", "h", "minutes", "minute", "min", "m", "seconds", "second", "sec", "s", "weeks", "week", "w", ] error_message = "'%s' is invalid, use one of %s" % ( freq, valid_keywords) try: # day for surfix in ["days", "day", "d"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(days=int(freq)) # hour for surfix in ["hours", "hour", "h"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(hours=int(freq)) # minute for surfix in ["minutes", "minute", "min", "m"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(minutes=int(freq)) # second for surfix in ["seconds", "second", "sec", "s"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(seconds=int(freq)) # week for surfix in ["weeks", "week", "w"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(days=int(freq) * 7) except: pass raise ValueError(error_message)
Parse timedelta. Valid keywords "days", "day", "d", "hours", "hour", "h", "minutes", "minute", "min", "m", "seconds", "second", "sec", "s", "weeks", "week", "w",
Below is the the instruction that describes the task: ### Input: Parse timedelta. Valid keywords "days", "day", "d", "hours", "hour", "h", "minutes", "minute", "min", "m", "seconds", "second", "sec", "s", "weeks", "week", "w", ### Response: def _freq_parser(self, freq): """Parse timedelta. Valid keywords "days", "day", "d", "hours", "hour", "h", "minutes", "minute", "min", "m", "seconds", "second", "sec", "s", "weeks", "week", "w", """ freq = freq.lower().strip() valid_keywords = [ "days", "day", "d", "hours", "hour", "h", "minutes", "minute", "min", "m", "seconds", "second", "sec", "s", "weeks", "week", "w", ] error_message = "'%s' is invalid, use one of %s" % ( freq, valid_keywords) try: # day for surfix in ["days", "day", "d"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(days=int(freq)) # hour for surfix in ["hours", "hour", "h"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(hours=int(freq)) # minute for surfix in ["minutes", "minute", "min", "m"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(minutes=int(freq)) # second for surfix in ["seconds", "second", "sec", "s"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(seconds=int(freq)) # week for surfix in ["weeks", "week", "w"]: if freq.endswith(surfix): freq = freq.replace(surfix, "") return timedelta(days=int(freq) * 7) except: pass raise ValueError(error_message)
def process_exception(self, request, exception): """Catches internal Horizon exception classes such as NotAuthorized, NotFound and Http302 and handles them gracefully. """ if isinstance(exception, (exceptions.NotAuthorized, exceptions.NotAuthenticated)): auth_url = settings.LOGIN_URL next_url = None # prevent submiting forms after login and # use http referer if request.method in ("POST", "PUT"): referrer = request.META.get('HTTP_REFERER') if referrer and is_safe_url(referrer, request.get_host()): next_url = referrer if not next_url: next_url = iri_to_uri(request.get_full_path()) if next_url != auth_url: field_name = REDIRECT_FIELD_NAME else: field_name = None login_url = request.build_absolute_uri(auth_url) response = redirect_to_login(next_url, login_url=login_url, redirect_field_name=field_name) if isinstance(exception, exceptions.NotAuthorized): logout_reason = _("Unauthorized. Please try logging in again.") utils.add_logout_reason(request, response, logout_reason) # delete messages, created in get_data() method # since we are going to redirect user to the login page response.delete_cookie('messages') if request.is_ajax(): response_401 = http.HttpResponse(status=401) response_401['X-Horizon-Location'] = response['location'] return response_401 return response # If an internal "NotFound" error gets this far, return a real 404. if isinstance(exception, exceptions.NotFound): raise http.Http404(exception) if isinstance(exception, exceptions.Http302): # TODO(gabriel): Find a way to display an appropriate message to # the user *on* the login form... return shortcuts.redirect(exception.location)
Catches internal Horizon exception classes such as NotAuthorized, NotFound and Http302 and handles them gracefully.
Below is the the instruction that describes the task: ### Input: Catches internal Horizon exception classes such as NotAuthorized, NotFound and Http302 and handles them gracefully. ### Response: def process_exception(self, request, exception): """Catches internal Horizon exception classes such as NotAuthorized, NotFound and Http302 and handles them gracefully. """ if isinstance(exception, (exceptions.NotAuthorized, exceptions.NotAuthenticated)): auth_url = settings.LOGIN_URL next_url = None # prevent submiting forms after login and # use http referer if request.method in ("POST", "PUT"): referrer = request.META.get('HTTP_REFERER') if referrer and is_safe_url(referrer, request.get_host()): next_url = referrer if not next_url: next_url = iri_to_uri(request.get_full_path()) if next_url != auth_url: field_name = REDIRECT_FIELD_NAME else: field_name = None login_url = request.build_absolute_uri(auth_url) response = redirect_to_login(next_url, login_url=login_url, redirect_field_name=field_name) if isinstance(exception, exceptions.NotAuthorized): logout_reason = _("Unauthorized. Please try logging in again.") utils.add_logout_reason(request, response, logout_reason) # delete messages, created in get_data() method # since we are going to redirect user to the login page response.delete_cookie('messages') if request.is_ajax(): response_401 = http.HttpResponse(status=401) response_401['X-Horizon-Location'] = response['location'] return response_401 return response # If an internal "NotFound" error gets this far, return a real 404. if isinstance(exception, exceptions.NotFound): raise http.Http404(exception) if isinstance(exception, exceptions.Http302): # TODO(gabriel): Find a way to display an appropriate message to # the user *on* the login form... return shortcuts.redirect(exception.location)
def _read_config(config_location): """ Read configuration for logging from a json file. Merges the read dictionary to LOGGING_CONFIG. :param config_location: Location of file. :return: nothing. """ global LOGGING_CONFIG with open(config_location, "r") as config_loc: cfg_file = json.load(config_loc) if "logging" in cfg_file: log_dict = cfg_file.get("logging") with open(os.path.abspath(os.path.join(__file__, os.path.pardir, 'logging_schema.json'))) as schema_file: logging_schema = json.load(schema_file) jsonschema.validate(log_dict, logging_schema) merged = jsonmerge.merge(LOGGING_CONFIG, log_dict) LOGGING_CONFIG = merged
Read configuration for logging from a json file. Merges the read dictionary to LOGGING_CONFIG. :param config_location: Location of file. :return: nothing.
Below is the the instruction that describes the task: ### Input: Read configuration for logging from a json file. Merges the read dictionary to LOGGING_CONFIG. :param config_location: Location of file. :return: nothing. ### Response: def _read_config(config_location): """ Read configuration for logging from a json file. Merges the read dictionary to LOGGING_CONFIG. :param config_location: Location of file. :return: nothing. """ global LOGGING_CONFIG with open(config_location, "r") as config_loc: cfg_file = json.load(config_loc) if "logging" in cfg_file: log_dict = cfg_file.get("logging") with open(os.path.abspath(os.path.join(__file__, os.path.pardir, 'logging_schema.json'))) as schema_file: logging_schema = json.load(schema_file) jsonschema.validate(log_dict, logging_schema) merged = jsonmerge.merge(LOGGING_CONFIG, log_dict) LOGGING_CONFIG = merged
def evaluate_version_bump(current_version: str, force: str = None) -> Optional[str]: """ Reads git log since last release to find out if should be a major, minor or patch release. :param current_version: A string with the current version number. :param force: A string with the bump level that should be forced. :return: A string with either major, minor or patch if there should be a release. If no release is necessary None will be returned. """ debug('evaluate_version_bump("{}", "{}")'.format(current_version, force)) if force: return force bump = None changes = [] commit_count = 0 for _hash, commit_message in get_commit_log('v{0}'.format(current_version)): if (current_version in commit_message and config.get('semantic_release', 'version_source') == 'commit'): debug('found {} in "{}. breaking loop'.format(current_version, commit_message)) break try: message = current_commit_parser()(commit_message) changes.append(message[0]) except UnknownCommitMessageStyleError as err: debug('ignored', err) pass commit_count += 1 if changes: level = max(changes) if level in LEVELS: bump = LEVELS[level] if config.getboolean('semantic_release', 'patch_without_tag') and commit_count: bump = 'patch' return bump
Reads git log since last release to find out if should be a major, minor or patch release. :param current_version: A string with the current version number. :param force: A string with the bump level that should be forced. :return: A string with either major, minor or patch if there should be a release. If no release is necessary None will be returned.
Below is the the instruction that describes the task: ### Input: Reads git log since last release to find out if should be a major, minor or patch release. :param current_version: A string with the current version number. :param force: A string with the bump level that should be forced. :return: A string with either major, minor or patch if there should be a release. If no release is necessary None will be returned. ### Response: def evaluate_version_bump(current_version: str, force: str = None) -> Optional[str]: """ Reads git log since last release to find out if should be a major, minor or patch release. :param current_version: A string with the current version number. :param force: A string with the bump level that should be forced. :return: A string with either major, minor or patch if there should be a release. If no release is necessary None will be returned. """ debug('evaluate_version_bump("{}", "{}")'.format(current_version, force)) if force: return force bump = None changes = [] commit_count = 0 for _hash, commit_message in get_commit_log('v{0}'.format(current_version)): if (current_version in commit_message and config.get('semantic_release', 'version_source') == 'commit'): debug('found {} in "{}. breaking loop'.format(current_version, commit_message)) break try: message = current_commit_parser()(commit_message) changes.append(message[0]) except UnknownCommitMessageStyleError as err: debug('ignored', err) pass commit_count += 1 if changes: level = max(changes) if level in LEVELS: bump = LEVELS[level] if config.getboolean('semantic_release', 'patch_without_tag') and commit_count: bump = 'patch' return bump
def tweet(ctx, message): """Sends a tweet directly to your timeline""" if not valid_tweet(message): click.echo("Message is too long for twitter.") click.echo("Message:" + message) ctx.exit(2) if not ctx.obj['DRYRUN']: ctx.obj['TWEEPY_API'].update_status(message) else: click.echo("Tweet not sent due to dry-run mode.")
Sends a tweet directly to your timeline
Below is the the instruction that describes the task: ### Input: Sends a tweet directly to your timeline ### Response: def tweet(ctx, message): """Sends a tweet directly to your timeline""" if not valid_tweet(message): click.echo("Message is too long for twitter.") click.echo("Message:" + message) ctx.exit(2) if not ctx.obj['DRYRUN']: ctx.obj['TWEEPY_API'].update_status(message) else: click.echo("Tweet not sent due to dry-run mode.")
def write(self, data): """! @brief Write bytes into the connection.""" # If nobody is connected, act like all data was written anyway. if self.connected is None: return 0 data = to_bytes_safe(data) size = len(data) remaining = size while remaining: count = self._abstract_socket.write(data) remaining -= count if remaining: data = data[count:] return size
! @brief Write bytes into the connection.
Below is the the instruction that describes the task: ### Input: ! @brief Write bytes into the connection. ### Response: def write(self, data): """! @brief Write bytes into the connection.""" # If nobody is connected, act like all data was written anyway. if self.connected is None: return 0 data = to_bytes_safe(data) size = len(data) remaining = size while remaining: count = self._abstract_socket.write(data) remaining -= count if remaining: data = data[count:] return size
def find(self, upload_id, **kwargs): """ Finds an upload by ID. """ return super(UploadsProxy, self).find(upload_id, file_upload=True)
Finds an upload by ID.
Below is the the instruction that describes the task: ### Input: Finds an upload by ID. ### Response: def find(self, upload_id, **kwargs): """ Finds an upload by ID. """ return super(UploadsProxy, self).find(upload_id, file_upload=True)
def __squid_to_guid(self, squid): ''' Squished GUID (SQUID) to GUID. A SQUID is a Squished/Compressed version of a GUID to use up less space in the registry. Args: squid (str): Squished GUID. Returns: str: the GUID if a valid SQUID provided. ''' if not squid: return '' squid_match = self.__squid_pattern.match(squid) guid = '' if squid_match is not None: guid = '{' +\ squid_match.group(1)[::-1]+'-' +\ squid_match.group(2)[::-1]+'-' +\ squid_match.group(3)[::-1]+'-' +\ squid_match.group(4)[::-1]+squid_match.group(5)[::-1] + '-' for index in range(6, 12): guid += squid_match.group(index)[::-1] guid += '}' return guid
Squished GUID (SQUID) to GUID. A SQUID is a Squished/Compressed version of a GUID to use up less space in the registry. Args: squid (str): Squished GUID. Returns: str: the GUID if a valid SQUID provided.
Below is the the instruction that describes the task: ### Input: Squished GUID (SQUID) to GUID. A SQUID is a Squished/Compressed version of a GUID to use up less space in the registry. Args: squid (str): Squished GUID. Returns: str: the GUID if a valid SQUID provided. ### Response: def __squid_to_guid(self, squid): ''' Squished GUID (SQUID) to GUID. A SQUID is a Squished/Compressed version of a GUID to use up less space in the registry. Args: squid (str): Squished GUID. Returns: str: the GUID if a valid SQUID provided. ''' if not squid: return '' squid_match = self.__squid_pattern.match(squid) guid = '' if squid_match is not None: guid = '{' +\ squid_match.group(1)[::-1]+'-' +\ squid_match.group(2)[::-1]+'-' +\ squid_match.group(3)[::-1]+'-' +\ squid_match.group(4)[::-1]+squid_match.group(5)[::-1] + '-' for index in range(6, 12): guid += squid_match.group(index)[::-1] guid += '}' return guid