body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
febe45e759afd912319d24252aef2955773733766e747abcf7fafc2f46ed8524
def has_selection(self): ' Returns True if the SEL tag is found in the Console widget ' return bool(self.tag_ranges(SEL))
Returns True if the SEL tag is found in the Console widget
Troop/src/interface/console.py
has_selection
mathigatti/EP
1
python
def has_selection(self): ' ' return bool(self.tag_ranges(SEL))
def has_selection(self): ' ' return bool(self.tag_ranges(SEL))<|docstring|>Returns True if the SEL tag is found in the Console widget<|endoftext|>
ea034edca06b85494493526153960fd882a40ff991351460b7cef047042d15ef
def mouse_press_right(self, event): ' Displays popup menu' self.popup.show(event) return 'break'
Displays popup menu
Troop/src/interface/console.py
mouse_press_right
mathigatti/EP
1
python
def mouse_press_right(self, event): ' ' self.popup.show(event) return 'break'
def mouse_press_right(self, event): ' ' self.popup.show(event) return 'break'<|docstring|>Displays popup menu<|endoftext|>
2e00694029e28922377f461b61b586499fd2bc117710da4709f6396e660708cf
def newer(source, target): "\n Return true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't. Return false if\n both exist and 'target' is the same age or younger than 'source'.\n " if (not os.path.exists(source)): raise ValueError(("file '%s' does not exist" % os.path.abspath(source))) if (not os.path.exists(target)): return 1 mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return (mtime1 > mtime2)
Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'.
scipy/special/utils/makenpz.py
newer
bamford/scipy
1
python
def newer(source, target): "\n Return true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't. Return false if\n both exist and 'target' is the same age or younger than 'source'.\n " if (not os.path.exists(source)): raise ValueError(("file '%s' does not exist" % os.path.abspath(source))) if (not os.path.exists(target)): return 1 mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return (mtime1 > mtime2)
def newer(source, target): "\n Return true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't. Return false if\n both exist and 'target' is the same age or younger than 'source'.\n " if (not os.path.exists(source)): raise ValueError(("file '%s' does not exist" % os.path.abspath(source))) if (not os.path.exists(target)): return 1 mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return (mtime1 > mtime2)<|docstring|>Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'.<|endoftext|>
7eaa299ae8f50bafbbbac0f0dbef692c7879a5427540bb5d0c7032c939ad95a1
def getRegisterContext(self): '\n return hexadecimal dump of registers as expected by GDB\n ' logging.debug('GDB getting register context') resp = '' reg_num_list = map((lambda reg: reg.reg_num), self._register_list) vals = self._context.readCoreRegistersRaw(reg_num_list) for (reg, regValue) in zip(self._register_list, vals): resp += conversion.u32beToHex8le(regValue) logging.debug('GDB reg: %s = 0x%X', reg.name, regValue) return resp
return hexadecimal dump of registers as expected by GDB
pyOCD/gdbserver/context_facade.py
getRegisterContext
orenc17/pyOCD
1
python
def getRegisterContext(self): '\n \n ' logging.debug('GDB getting register context') resp = reg_num_list = map((lambda reg: reg.reg_num), self._register_list) vals = self._context.readCoreRegistersRaw(reg_num_list) for (reg, regValue) in zip(self._register_list, vals): resp += conversion.u32beToHex8le(regValue) logging.debug('GDB reg: %s = 0x%X', reg.name, regValue) return resp
def getRegisterContext(self): '\n \n ' logging.debug('GDB getting register context') resp = reg_num_list = map((lambda reg: reg.reg_num), self._register_list) vals = self._context.readCoreRegistersRaw(reg_num_list) for (reg, regValue) in zip(self._register_list, vals): resp += conversion.u32beToHex8le(regValue) logging.debug('GDB reg: %s = 0x%X', reg.name, regValue) return resp<|docstring|>return hexadecimal dump of registers as expected by GDB<|endoftext|>
d573cae537851ef1fe8a208e1eaa5752908586927f564e1265aa35f8d210f078
def setRegisterContext(self, data): '\n Set registers from GDB hexadecimal string.\n ' logging.debug('GDB setting register context') reg_num_list = [] reg_data_list = [] for reg in self._register_list: regValue = conversion.hex8leToU32be(data) reg_num_list.append(reg.reg_num) reg_data_list.append(regValue) logging.debug('GDB reg: %s = 0x%X', reg.name, regValue) data = data[8:] self._context.writeCoreRegistersRaw(reg_num_list, reg_data_list)
Set registers from GDB hexadecimal string.
pyOCD/gdbserver/context_facade.py
setRegisterContext
orenc17/pyOCD
1
python
def setRegisterContext(self, data): '\n \n ' logging.debug('GDB setting register context') reg_num_list = [] reg_data_list = [] for reg in self._register_list: regValue = conversion.hex8leToU32be(data) reg_num_list.append(reg.reg_num) reg_data_list.append(regValue) logging.debug('GDB reg: %s = 0x%X', reg.name, regValue) data = data[8:] self._context.writeCoreRegistersRaw(reg_num_list, reg_data_list)
def setRegisterContext(self, data): '\n \n ' logging.debug('GDB setting register context') reg_num_list = [] reg_data_list = [] for reg in self._register_list: regValue = conversion.hex8leToU32be(data) reg_num_list.append(reg.reg_num) reg_data_list.append(regValue) logging.debug('GDB reg: %s = 0x%X', reg.name, regValue) data = data[8:] self._context.writeCoreRegistersRaw(reg_num_list, reg_data_list)<|docstring|>Set registers from GDB hexadecimal string.<|endoftext|>
e2e5c30f4e75c8fa630d3eeb16e953cbf6c0e2d9fa88e7d5ad8994b59641f13d
def setRegister(self, reg, data): '\n Set single register from GDB hexadecimal string.\n reg parameter is the index of register in targetXML sent to GDB.\n ' if (reg < 0): return elif (reg < len(self._register_list)): regName = self._register_list[reg].name value = conversion.hex8leToU32be(data) logging.debug('GDB: write reg %s: 0x%X', regName, value) self._context.writeCoreRegisterRaw(regName, value)
Set single register from GDB hexadecimal string. reg parameter is the index of register in targetXML sent to GDB.
pyOCD/gdbserver/context_facade.py
setRegister
orenc17/pyOCD
1
python
def setRegister(self, reg, data): '\n Set single register from GDB hexadecimal string.\n reg parameter is the index of register in targetXML sent to GDB.\n ' if (reg < 0): return elif (reg < len(self._register_list)): regName = self._register_list[reg].name value = conversion.hex8leToU32be(data) logging.debug('GDB: write reg %s: 0x%X', regName, value) self._context.writeCoreRegisterRaw(regName, value)
def setRegister(self, reg, data): '\n Set single register from GDB hexadecimal string.\n reg parameter is the index of register in targetXML sent to GDB.\n ' if (reg < 0): return elif (reg < len(self._register_list)): regName = self._register_list[reg].name value = conversion.hex8leToU32be(data) logging.debug('GDB: write reg %s: 0x%X', regName, value) self._context.writeCoreRegisterRaw(regName, value)<|docstring|>Set single register from GDB hexadecimal string. reg parameter is the index of register in targetXML sent to GDB.<|endoftext|>
4c0b7e506ee452dafd5218bf96730be728f6436cf609fe4b82b8312ea5b7659a
def getTResponse(self, forceSignal=None): '\n Returns a GDB T response string. This includes:\n The signal encountered.\n The current value of the important registers (sp, lr, pc).\n ' if (forceSignal is not None): response = ('T' + conversion.byteToHex2(forceSignal)) else: response = ('T' + conversion.byteToHex2(self.getSignalValue())) response += self.getRegIndexValuePairs([7, 13, 14, 15]) return response
Returns a GDB T response string. This includes: The signal encountered. The current value of the important registers (sp, lr, pc).
pyOCD/gdbserver/context_facade.py
getTResponse
orenc17/pyOCD
1
python
def getTResponse(self, forceSignal=None): '\n Returns a GDB T response string. This includes:\n The signal encountered.\n The current value of the important registers (sp, lr, pc).\n ' if (forceSignal is not None): response = ('T' + conversion.byteToHex2(forceSignal)) else: response = ('T' + conversion.byteToHex2(self.getSignalValue())) response += self.getRegIndexValuePairs([7, 13, 14, 15]) return response
def getTResponse(self, forceSignal=None): '\n Returns a GDB T response string. This includes:\n The signal encountered.\n The current value of the important registers (sp, lr, pc).\n ' if (forceSignal is not None): response = ('T' + conversion.byteToHex2(forceSignal)) else: response = ('T' + conversion.byteToHex2(self.getSignalValue())) response += self.getRegIndexValuePairs([7, 13, 14, 15]) return response<|docstring|>Returns a GDB T response string. This includes: The signal encountered. The current value of the important registers (sp, lr, pc).<|endoftext|>
47c473d29f0775f64e5988dba8724c141c693ee611a5874a65df87f0a2b77e7b
def getRegIndexValuePairs(self, regIndexList): '\n Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;...\n for the T response string. NN is the index of the\n register to follow MMMMMMMM is the value of the register.\n ' str = '' regList = self._context.readCoreRegistersRaw(regIndexList) for (regIndex, reg) in zip(regIndexList, regList): str += (((conversion.byteToHex2(regIndex) + ':') + conversion.u32beToHex8le(reg)) + ';') return str
Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;... for the T response string. NN is the index of the register to follow MMMMMMMM is the value of the register.
pyOCD/gdbserver/context_facade.py
getRegIndexValuePairs
orenc17/pyOCD
1
python
def getRegIndexValuePairs(self, regIndexList): '\n Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;...\n for the T response string. NN is the index of the\n register to follow MMMMMMMM is the value of the register.\n ' str = regList = self._context.readCoreRegistersRaw(regIndexList) for (regIndex, reg) in zip(regIndexList, regList): str += (((conversion.byteToHex2(regIndex) + ':') + conversion.u32beToHex8le(reg)) + ';') return str
def getRegIndexValuePairs(self, regIndexList): '\n Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;...\n for the T response string. NN is the index of the\n register to follow MMMMMMMM is the value of the register.\n ' str = regList = self._context.readCoreRegistersRaw(regIndexList) for (regIndex, reg) in zip(regIndexList, regList): str += (((conversion.byteToHex2(regIndex) + ':') + conversion.u32beToHex8le(reg)) + ';') return str<|docstring|>Returns a string like NN:MMMMMMMM;NN:MMMMMMMM;... for the T response string. NN is the index of the register to follow MMMMMMMM is the value of the register.<|endoftext|>
c47c2e25753f5e0098c6d3bb31013e383d63ee6818dfe86b29fd8d6282fc215d
def merge_model(self, training, models_to_add: list): '\n Will update this fields from training fields. If is necessary to add a new exercise, it will be added on\n models_to_add\n\n :param training: the training with the new fields\n :param models_to_add: list that will be updated with new exercise\n ' super().merge_model(training, models_to_add) if training.exercises: for exercise in training.exercises: exercise.training = self self.workout_plan_id = (training.workout_plan.id if training.workout_plan else None) self.start_date = training.start_date self.end_date = training.end_date merge_lists(self.exercises, training.exercises, models_to_add)
Will update this fields from training fields. If is necessary to add a new exercise, it will be added on models_to_add :param training: the training with the new fields :param models_to_add: list that will be updated with new exercise
workout_plan_server/adapters/mysql/models/training_model.py
merge_model
vitorsm/workout-plan-server
0
python
def merge_model(self, training, models_to_add: list): '\n Will update this fields from training fields. If is necessary to add a new exercise, it will be added on\n models_to_add\n\n :param training: the training with the new fields\n :param models_to_add: list that will be updated with new exercise\n ' super().merge_model(training, models_to_add) if training.exercises: for exercise in training.exercises: exercise.training = self self.workout_plan_id = (training.workout_plan.id if training.workout_plan else None) self.start_date = training.start_date self.end_date = training.end_date merge_lists(self.exercises, training.exercises, models_to_add)
def merge_model(self, training, models_to_add: list): '\n Will update this fields from training fields. If is necessary to add a new exercise, it will be added on\n models_to_add\n\n :param training: the training with the new fields\n :param models_to_add: list that will be updated with new exercise\n ' super().merge_model(training, models_to_add) if training.exercises: for exercise in training.exercises: exercise.training = self self.workout_plan_id = (training.workout_plan.id if training.workout_plan else None) self.start_date = training.start_date self.end_date = training.end_date merge_lists(self.exercises, training.exercises, models_to_add)<|docstring|>Will update this fields from training fields. If is necessary to add a new exercise, it will be added on models_to_add :param training: the training with the new fields :param models_to_add: list that will be updated with new exercise<|endoftext|>
e32f51e1298c459c7ef3cec2ca82c4ca848b6acff4c70a58c3bab0ea93c729b8
@jit(nopython=True) def crossCorr(t1, t2, binsize, nbins): ' \n\t\tFast crossCorr \n\t' nt1 = len(t1) nt2 = len(t2) if ((np.floor((nbins / 2)) * 2) == nbins): nbins = (nbins + 1) m = ((- binsize) * ((nbins + 1) / 2)) B = np.zeros(nbins) for j in range(nbins): B[j] = (m + (j * binsize)) w = ((nbins / 2) * binsize) C = np.zeros(nbins) i2 = 1 for i1 in range(nt1): lbound = (t1[i1] - w) while ((i2 < nt2) and (t2[i2] < lbound)): i2 = (i2 + 1) while ((i2 > 1) and (t2[(i2 - 1)] > lbound)): i2 = (i2 - 1) rbound = lbound l = i2 for j in range(nbins): k = 0 rbound = (rbound + binsize) while ((l < nt2) and (t2[l] < rbound)): l = (l + 1) k = (k + 1) C[j] += k C = (C / ((nt1 * binsize) / 1000)) return C
Fast crossCorr
python/functions.py
crossCorr
gviejo/ColdPlay
0
python
@jit(nopython=True) def crossCorr(t1, t2, binsize, nbins): ' \n\t\t \n\t' nt1 = len(t1) nt2 = len(t2) if ((np.floor((nbins / 2)) * 2) == nbins): nbins = (nbins + 1) m = ((- binsize) * ((nbins + 1) / 2)) B = np.zeros(nbins) for j in range(nbins): B[j] = (m + (j * binsize)) w = ((nbins / 2) * binsize) C = np.zeros(nbins) i2 = 1 for i1 in range(nt1): lbound = (t1[i1] - w) while ((i2 < nt2) and (t2[i2] < lbound)): i2 = (i2 + 1) while ((i2 > 1) and (t2[(i2 - 1)] > lbound)): i2 = (i2 - 1) rbound = lbound l = i2 for j in range(nbins): k = 0 rbound = (rbound + binsize) while ((l < nt2) and (t2[l] < rbound)): l = (l + 1) k = (k + 1) C[j] += k C = (C / ((nt1 * binsize) / 1000)) return C
@jit(nopython=True) def crossCorr(t1, t2, binsize, nbins): ' \n\t\t \n\t' nt1 = len(t1) nt2 = len(t2) if ((np.floor((nbins / 2)) * 2) == nbins): nbins = (nbins + 1) m = ((- binsize) * ((nbins + 1) / 2)) B = np.zeros(nbins) for j in range(nbins): B[j] = (m + (j * binsize)) w = ((nbins / 2) * binsize) C = np.zeros(nbins) i2 = 1 for i1 in range(nt1): lbound = (t1[i1] - w) while ((i2 < nt2) and (t2[i2] < lbound)): i2 = (i2 + 1) while ((i2 > 1) and (t2[(i2 - 1)] > lbound)): i2 = (i2 - 1) rbound = lbound l = i2 for j in range(nbins): k = 0 rbound = (rbound + binsize) while ((l < nt2) and (t2[l] < rbound)): l = (l + 1) k = (k + 1) C[j] += k C = (C / ((nt1 * binsize) / 1000)) return C<|docstring|>Fast crossCorr<|endoftext|>
881e163586c8b3357fe7309fe525ec216a004832ecdb75e778d433307df73fbd
def crossCorr2(t1, t2, binsize, nbins): '\n\t\tSlow crossCorr\n\t' window = (np.arange(((- binsize) * (nbins / 2)), ((binsize * (nbins / 2)) + (2 * binsize)), binsize) - (binsize / 2.0)) allcount = np.zeros((nbins + 1)) for e in t1: mwind = (window + e) mwind = np.array((([(- 1.0)] + list(mwind)) + [(np.max([t1.max(), t2.max()]) + binsize)])) index = np.digitize(t2, mwind) count = np.array([np.sum((index == i)) for i in range(2, (mwind.shape[0] - 1))]) allcount += np.array(count) allcount = (allcount / ((float(len(t1)) * binsize) / 1000)) return allcount
Slow crossCorr
python/functions.py
crossCorr2
gviejo/ColdPlay
0
python
def crossCorr2(t1, t2, binsize, nbins): '\n\t\t\n\t' window = (np.arange(((- binsize) * (nbins / 2)), ((binsize * (nbins / 2)) + (2 * binsize)), binsize) - (binsize / 2.0)) allcount = np.zeros((nbins + 1)) for e in t1: mwind = (window + e) mwind = np.array((([(- 1.0)] + list(mwind)) + [(np.max([t1.max(), t2.max()]) + binsize)])) index = np.digitize(t2, mwind) count = np.array([np.sum((index == i)) for i in range(2, (mwind.shape[0] - 1))]) allcount += np.array(count) allcount = (allcount / ((float(len(t1)) * binsize) / 1000)) return allcount
def crossCorr2(t1, t2, binsize, nbins): '\n\t\t\n\t' window = (np.arange(((- binsize) * (nbins / 2)), ((binsize * (nbins / 2)) + (2 * binsize)), binsize) - (binsize / 2.0)) allcount = np.zeros((nbins + 1)) for e in t1: mwind = (window + e) mwind = np.array((([(- 1.0)] + list(mwind)) + [(np.max([t1.max(), t2.max()]) + binsize)])) index = np.digitize(t2, mwind) count = np.array([np.sum((index == i)) for i in range(2, (mwind.shape[0] - 1))]) allcount += np.array(count) allcount = (allcount / ((float(len(t1)) * binsize) / 1000)) return allcount<|docstring|>Slow crossCorr<|endoftext|>
c052155119dbec84d736655db8acb586c374a88127cebc68f2c1b4a3d8501988
def findHDCells(tuning_curves, z=50, p=0.0001, m=1): '\n\t\tPeak firing rate larger than 1\n\t\tand Rayleigh test p<0.001 & z > 100\n\t' cond1 = (tuning_curves.max() > m) from pycircstat.tests import rayleigh stat = pd.DataFrame(index=tuning_curves.columns, columns=['pval', 'z']) for k in tuning_curves: stat.loc[k] = rayleigh(tuning_curves[k].index.values, tuning_curves[k].values) cond2 = np.logical_and((stat['pval'] < p), (stat['z'] > z)) tokeep = stat.index.values[np.where(np.logical_and(cond1, cond2))[0]] return (tokeep, stat)
Peak firing rate larger than 1 and Rayleigh test p<0.001 & z > 100
python/functions.py
findHDCells
gviejo/ColdPlay
0
python
def findHDCells(tuning_curves, z=50, p=0.0001, m=1): '\n\t\tPeak firing rate larger than 1\n\t\tand Rayleigh test p<0.001 & z > 100\n\t' cond1 = (tuning_curves.max() > m) from pycircstat.tests import rayleigh stat = pd.DataFrame(index=tuning_curves.columns, columns=['pval', 'z']) for k in tuning_curves: stat.loc[k] = rayleigh(tuning_curves[k].index.values, tuning_curves[k].values) cond2 = np.logical_and((stat['pval'] < p), (stat['z'] > z)) tokeep = stat.index.values[np.where(np.logical_and(cond1, cond2))[0]] return (tokeep, stat)
def findHDCells(tuning_curves, z=50, p=0.0001, m=1): '\n\t\tPeak firing rate larger than 1\n\t\tand Rayleigh test p<0.001 & z > 100\n\t' cond1 = (tuning_curves.max() > m) from pycircstat.tests import rayleigh stat = pd.DataFrame(index=tuning_curves.columns, columns=['pval', 'z']) for k in tuning_curves: stat.loc[k] = rayleigh(tuning_curves[k].index.values, tuning_curves[k].values) cond2 = np.logical_and((stat['pval'] < p), (stat['z'] > z)) tokeep = stat.index.values[np.where(np.logical_and(cond1, cond2))[0]] return (tokeep, stat)<|docstring|>Peak firing rate larger than 1 and Rayleigh test p<0.001 & z > 100<|endoftext|>
0721d8d119c82449f0d22576ce222f77bc021d8688bb814698fc1348edb7766d
def decodeHD(tuning_curves, spikes, ep, bin_size=200, px=None): '\n\t\tSee : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells\n\t\ttuning_curves: pd.DataFrame with angular position as index and columns as neuron\n\t\tspikes : dictionnary of spike times\n\t\tep : nts.IntervalSet, the epochs for decoding\n\t\tbin_size : in ms (default:200ms)\n\t\tpx : Occupancy. If None, px is uniform\n\t' if (len(ep) == 1): bins = np.arange(ep.as_units('ms').start.iloc[0], ep.as_units('ms').end.iloc[(- 1)], bin_size) else: print('TODO') sys.exit() order = tuning_curves.columns.values w = scipy.signal.gaussian(51, 2) spike_counts = pd.DataFrame(index=(bins[0:(- 1)] + (np.diff(bins) / 2)), columns=order) for n in spike_counts: spks = spikes[n].restrict(ep).as_units('ms').index.values tmp = np.histogram(spks, bins) spike_counts[n] = np.convolve(tmp[0], w, mode='same') tcurves_array = tuning_curves.values spike_counts_array = spike_counts.values proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0])) part1 = np.exp(((- (bin_size / 1000)) * tcurves_array.sum(1))) if (px is not None): part2 = px else: part2 = np.ones(tuning_curves.shape[0]) for i in range(len(proba_angle)): part3 = np.prod((tcurves_array ** spike_counts_array[i]), 1) p = ((part1 * part2) * part3) proba_angle[i] = (p / p.sum()) proba_angle = pd.DataFrame(index=spike_counts.index.values, columns=tuning_curves.index.values, data=proba_angle) proba_angle = proba_angle.astype('float') decoded = nts.Tsd(t=proba_angle.index.values, d=proba_angle.idxmax(1).values, time_units='ms') return (decoded, proba_angle, spike_counts)
See : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells tuning_curves: pd.DataFrame with angular position as index and columns as neuron spikes : dictionnary of spike times ep : nts.IntervalSet, the epochs for decoding bin_size : in ms (default:200ms) px : Occupancy. If None, px is uniform
python/functions.py
decodeHD
gviejo/ColdPlay
0
python
def decodeHD(tuning_curves, spikes, ep, bin_size=200, px=None): '\n\t\tSee : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells\n\t\ttuning_curves: pd.DataFrame with angular position as index and columns as neuron\n\t\tspikes : dictionnary of spike times\n\t\tep : nts.IntervalSet, the epochs for decoding\n\t\tbin_size : in ms (default:200ms)\n\t\tpx : Occupancy. If None, px is uniform\n\t' if (len(ep) == 1): bins = np.arange(ep.as_units('ms').start.iloc[0], ep.as_units('ms').end.iloc[(- 1)], bin_size) else: print('TODO') sys.exit() order = tuning_curves.columns.values w = scipy.signal.gaussian(51, 2) spike_counts = pd.DataFrame(index=(bins[0:(- 1)] + (np.diff(bins) / 2)), columns=order) for n in spike_counts: spks = spikes[n].restrict(ep).as_units('ms').index.values tmp = np.histogram(spks, bins) spike_counts[n] = np.convolve(tmp[0], w, mode='same') tcurves_array = tuning_curves.values spike_counts_array = spike_counts.values proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0])) part1 = np.exp(((- (bin_size / 1000)) * tcurves_array.sum(1))) if (px is not None): part2 = px else: part2 = np.ones(tuning_curves.shape[0]) for i in range(len(proba_angle)): part3 = np.prod((tcurves_array ** spike_counts_array[i]), 1) p = ((part1 * part2) * part3) proba_angle[i] = (p / p.sum()) proba_angle = pd.DataFrame(index=spike_counts.index.values, columns=tuning_curves.index.values, data=proba_angle) proba_angle = proba_angle.astype('float') decoded = nts.Tsd(t=proba_angle.index.values, d=proba_angle.idxmax(1).values, time_units='ms') return (decoded, proba_angle, spike_counts)
def decodeHD(tuning_curves, spikes, ep, bin_size=200, px=None): '\n\t\tSee : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells\n\t\ttuning_curves: pd.DataFrame with angular position as index and columns as neuron\n\t\tspikes : dictionnary of spike times\n\t\tep : nts.IntervalSet, the epochs for decoding\n\t\tbin_size : in ms (default:200ms)\n\t\tpx : Occupancy. If None, px is uniform\n\t' if (len(ep) == 1): bins = np.arange(ep.as_units('ms').start.iloc[0], ep.as_units('ms').end.iloc[(- 1)], bin_size) else: print('TODO') sys.exit() order = tuning_curves.columns.values w = scipy.signal.gaussian(51, 2) spike_counts = pd.DataFrame(index=(bins[0:(- 1)] + (np.diff(bins) / 2)), columns=order) for n in spike_counts: spks = spikes[n].restrict(ep).as_units('ms').index.values tmp = np.histogram(spks, bins) spike_counts[n] = np.convolve(tmp[0], w, mode='same') tcurves_array = tuning_curves.values spike_counts_array = spike_counts.values proba_angle = np.zeros((spike_counts.shape[0], tuning_curves.shape[0])) part1 = np.exp(((- (bin_size / 1000)) * tcurves_array.sum(1))) if (px is not None): part2 = px else: part2 = np.ones(tuning_curves.shape[0]) for i in range(len(proba_angle)): part3 = np.prod((tcurves_array ** spike_counts_array[i]), 1) p = ((part1 * part2) * part3) proba_angle[i] = (p / p.sum()) proba_angle = pd.DataFrame(index=spike_counts.index.values, columns=tuning_curves.index.values, data=proba_angle) proba_angle = proba_angle.astype('float') decoded = nts.Tsd(t=proba_angle.index.values, d=proba_angle.idxmax(1).values, time_units='ms') return (decoded, proba_angle, spike_counts)<|docstring|>See : Zhang, 1998, Interpreting Neuronal Population Activity by Reconstruction: Unified Framework With Application to Hippocampal Place Cells tuning_curves: pd.DataFrame with angular position as index and columns as neuron spikes : dictionnary of spike times ep : nts.IntervalSet, the epochs for decoding bin_size : in ms (default:200ms) px : Occupancy. If None, px is uniform<|endoftext|>
4c0c02c8609e8650d1d8ebc17332cd69b70cef8dd62840a31e0214191abf499f
def centerTuningCurves(tcurve): '\n\tcenter tuning curves by peak\n\t' peak = pd.Series(index=tcurve.columns, data=np.array([circmean(tcurve.index.values, tcurve[i].values) for i in tcurve.columns])) new_tcurve = [] for p in tcurve.columns: x = (tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(peak[p], method='nearest')]) x[(x < (- np.pi))] += (2 * np.pi) x[(x > np.pi)] -= (2 * np.pi) tmp = pd.Series(index=x, data=tcurve[p].values).sort_index() new_tcurve.append(tmp.values) new_tcurve = pd.DataFrame(index=np.linspace((- np.pi), np.pi, (tcurve.shape[0] + 1))[0:(- 1)], data=np.array(new_tcurve).T, columns=tcurve.columns) return new_tcurve
center tuning curves by peak
python/functions.py
centerTuningCurves
gviejo/ColdPlay
0
python
def centerTuningCurves(tcurve): '\n\t\n\t' peak = pd.Series(index=tcurve.columns, data=np.array([circmean(tcurve.index.values, tcurve[i].values) for i in tcurve.columns])) new_tcurve = [] for p in tcurve.columns: x = (tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(peak[p], method='nearest')]) x[(x < (- np.pi))] += (2 * np.pi) x[(x > np.pi)] -= (2 * np.pi) tmp = pd.Series(index=x, data=tcurve[p].values).sort_index() new_tcurve.append(tmp.values) new_tcurve = pd.DataFrame(index=np.linspace((- np.pi), np.pi, (tcurve.shape[0] + 1))[0:(- 1)], data=np.array(new_tcurve).T, columns=tcurve.columns) return new_tcurve
def centerTuningCurves(tcurve): '\n\t\n\t' peak = pd.Series(index=tcurve.columns, data=np.array([circmean(tcurve.index.values, tcurve[i].values) for i in tcurve.columns])) new_tcurve = [] for p in tcurve.columns: x = (tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(peak[p], method='nearest')]) x[(x < (- np.pi))] += (2 * np.pi) x[(x > np.pi)] -= (2 * np.pi) tmp = pd.Series(index=x, data=tcurve[p].values).sort_index() new_tcurve.append(tmp.values) new_tcurve = pd.DataFrame(index=np.linspace((- np.pi), np.pi, (tcurve.shape[0] + 1))[0:(- 1)], data=np.array(new_tcurve).T, columns=tcurve.columns) return new_tcurve<|docstring|>center tuning curves by peak<|endoftext|>
47d7ddfad57a6c4d97f892f53f1fdf3bad88d4516c4ce082e46ef1315fb3b2b5
def offsetTuningCurves(tcurve, diffs): '\n\toffseting tuning curves synced by diff\n\t' new_tcurve = [] for p in tcurve.columns: x = (tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(diffs[p], method='nearest')]) x[(x < (- np.pi))] += (2 * np.pi) x[(x > np.pi)] -= (2 * np.pi) tmp = pd.Series(index=x, data=tcurve[p].values).sort_index() new_tcurve.append(tmp.values) new_tcurve = pd.DataFrame(index=np.linspace((- np.pi), np.pi, (tcurve.shape[0] + 1))[0:(- 1)], data=np.array(new_tcurve).T, columns=tcurve.columns) return new_tcurve
offseting tuning curves synced by diff
python/functions.py
offsetTuningCurves
gviejo/ColdPlay
0
python
def offsetTuningCurves(tcurve, diffs): '\n\t\n\t' new_tcurve = [] for p in tcurve.columns: x = (tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(diffs[p], method='nearest')]) x[(x < (- np.pi))] += (2 * np.pi) x[(x > np.pi)] -= (2 * np.pi) tmp = pd.Series(index=x, data=tcurve[p].values).sort_index() new_tcurve.append(tmp.values) new_tcurve = pd.DataFrame(index=np.linspace((- np.pi), np.pi, (tcurve.shape[0] + 1))[0:(- 1)], data=np.array(new_tcurve).T, columns=tcurve.columns) return new_tcurve
def offsetTuningCurves(tcurve, diffs): '\n\t\n\t' new_tcurve = [] for p in tcurve.columns: x = (tcurve[p].index.values - tcurve[p].index[tcurve[p].index.get_loc(diffs[p], method='nearest')]) x[(x < (- np.pi))] += (2 * np.pi) x[(x > np.pi)] -= (2 * np.pi) tmp = pd.Series(index=x, data=tcurve[p].values).sort_index() new_tcurve.append(tmp.values) new_tcurve = pd.DataFrame(index=np.linspace((- np.pi), np.pi, (tcurve.shape[0] + 1))[0:(- 1)], data=np.array(new_tcurve).T, columns=tcurve.columns) return new_tcurve<|docstring|>offseting tuning curves synced by diff<|endoftext|>
bd54abdfe10bf569574bf9061545dbf6974fdd89ed15dfeaad244218c84a8116
def getPeaksandTroughs(lfp, min_points): '\t \n\t\tAt 250Hz (1250/5), 2 troughs cannont be closer than 20 (min_points) points (if theta reaches 12Hz);\t\t\n\t' import neuroseries as nts import scipy.signal if isinstance(lfp, nts.time_series.Tsd): troughs = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmin(lfp.values, order=min_points)[0]], time_units='us') peaks = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmax(lfp.values, order=min_points)[0]], time_units='us') tmp = nts.Tsd(troughs.realign(peaks, align='next').as_series().drop_duplicates('first')) peaks = peaks[tmp.index] tmp = nts.Tsd(peaks.realign(troughs, align='prev').as_series().drop_duplicates('first')) troughs = troughs[tmp.index] return (peaks, troughs) elif isinstance(lfp, nts.time_series.TsdFrame): peaks = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) troughs = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) for i in lfp.keys(): (peaks[i], troughs[i]) = getPeaksandTroughs(lfp[i], min_points) return (peaks, troughs)
At 250Hz (1250/5), 2 troughs cannont be closer than 20 (min_points) points (if theta reaches 12Hz);
python/functions.py
getPeaksandTroughs
gviejo/ColdPlay
0
python
def getPeaksandTroughs(lfp, min_points): '\t \n\t\t\t\t\n\t' import neuroseries as nts import scipy.signal if isinstance(lfp, nts.time_series.Tsd): troughs = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmin(lfp.values, order=min_points)[0]], time_units='us') peaks = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmax(lfp.values, order=min_points)[0]], time_units='us') tmp = nts.Tsd(troughs.realign(peaks, align='next').as_series().drop_duplicates('first')) peaks = peaks[tmp.index] tmp = nts.Tsd(peaks.realign(troughs, align='prev').as_series().drop_duplicates('first')) troughs = troughs[tmp.index] return (peaks, troughs) elif isinstance(lfp, nts.time_series.TsdFrame): peaks = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) troughs = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) for i in lfp.keys(): (peaks[i], troughs[i]) = getPeaksandTroughs(lfp[i], min_points) return (peaks, troughs)
def getPeaksandTroughs(lfp, min_points): '\t \n\t\t\t\t\n\t' import neuroseries as nts import scipy.signal if isinstance(lfp, nts.time_series.Tsd): troughs = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmin(lfp.values, order=min_points)[0]], time_units='us') peaks = nts.Tsd(lfp.as_series().iloc[scipy.signal.argrelmax(lfp.values, order=min_points)[0]], time_units='us') tmp = nts.Tsd(troughs.realign(peaks, align='next').as_series().drop_duplicates('first')) peaks = peaks[tmp.index] tmp = nts.Tsd(peaks.realign(troughs, align='prev').as_series().drop_duplicates('first')) troughs = troughs[tmp.index] return (peaks, troughs) elif isinstance(lfp, nts.time_series.TsdFrame): peaks = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) troughs = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) for i in lfp.keys(): (peaks[i], troughs[i]) = getPeaksandTroughs(lfp[i], min_points) return (peaks, troughs)<|docstring|>At 250Hz (1250/5), 2 troughs cannont be closer than 20 (min_points) points (if theta reaches 12Hz);<|endoftext|>
13606d90dcd479bfef47b4ad38e1a055012d2be819837099afd27aa490c73268
def getPhase(lfp, fmin, fmax, nbins, fsamp, power=False): ' Continuous Wavelets Transform\n\t\treturn phase of lfp in a Tsd array\n\t' import neuroseries as nts from Wavelets import MyMorlet as Morlet if isinstance(lfp, nts.time_series.TsdFrame): allphase = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) allpwr = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) for i in lfp.keys(): (allphase[i], allpwr[i]) = getPhase(lfp[i], fmin, fmax, nbins, fsamp, power=True) if power: return (allphase, allpwr) else: return allphase elif isinstance(lfp, nts.time_series.Tsd): cw = Morlet(lfp.values, fmin, fmax, nbins, fsamp) cwt = cw.getdata() cwt = np.flip(cwt, axis=0) wave = (np.abs(cwt) ** 2.0) phases = np.arctan2(np.imag(cwt), np.real(cwt)).transpose() cwt = None index = np.argmax(wave, 0) phase = np.zeros(len(index)) for i in range(len(index)): phase[i] = phases[(i, index[i])] phases = None if power: pwrs = cw.getpower() pwr = np.zeros(len(index)) for i in range(len(index)): pwr[i] = pwrs[(index[i], i)] return (nts.Tsd(lfp.index.values, phase), nts.Tsd(lfp.index.values, pwr)) else: return nts.Tsd(lfp.index.values, phase)
Continuous Wavelets Transform return phase of lfp in a Tsd array
python/functions.py
getPhase
gviejo/ColdPlay
0
python
def getPhase(lfp, fmin, fmax, nbins, fsamp, power=False): ' Continuous Wavelets Transform\n\t\treturn phase of lfp in a Tsd array\n\t' import neuroseries as nts from Wavelets import MyMorlet as Morlet if isinstance(lfp, nts.time_series.TsdFrame): allphase = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) allpwr = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) for i in lfp.keys(): (allphase[i], allpwr[i]) = getPhase(lfp[i], fmin, fmax, nbins, fsamp, power=True) if power: return (allphase, allpwr) else: return allphase elif isinstance(lfp, nts.time_series.Tsd): cw = Morlet(lfp.values, fmin, fmax, nbins, fsamp) cwt = cw.getdata() cwt = np.flip(cwt, axis=0) wave = (np.abs(cwt) ** 2.0) phases = np.arctan2(np.imag(cwt), np.real(cwt)).transpose() cwt = None index = np.argmax(wave, 0) phase = np.zeros(len(index)) for i in range(len(index)): phase[i] = phases[(i, index[i])] phases = None if power: pwrs = cw.getpower() pwr = np.zeros(len(index)) for i in range(len(index)): pwr[i] = pwrs[(index[i], i)] return (nts.Tsd(lfp.index.values, phase), nts.Tsd(lfp.index.values, pwr)) else: return nts.Tsd(lfp.index.values, phase)
def getPhase(lfp, fmin, fmax, nbins, fsamp, power=False): ' Continuous Wavelets Transform\n\t\treturn phase of lfp in a Tsd array\n\t' import neuroseries as nts from Wavelets import MyMorlet as Morlet if isinstance(lfp, nts.time_series.TsdFrame): allphase = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) allpwr = nts.TsdFrame(lfp.index.values, np.zeros(lfp.shape)) for i in lfp.keys(): (allphase[i], allpwr[i]) = getPhase(lfp[i], fmin, fmax, nbins, fsamp, power=True) if power: return (allphase, allpwr) else: return allphase elif isinstance(lfp, nts.time_series.Tsd): cw = Morlet(lfp.values, fmin, fmax, nbins, fsamp) cwt = cw.getdata() cwt = np.flip(cwt, axis=0) wave = (np.abs(cwt) ** 2.0) phases = np.arctan2(np.imag(cwt), np.real(cwt)).transpose() cwt = None index = np.argmax(wave, 0) phase = np.zeros(len(index)) for i in range(len(index)): phase[i] = phases[(i, index[i])] phases = None if power: pwrs = cw.getpower() pwr = np.zeros(len(index)) for i in range(len(index)): pwr[i] = pwrs[(index[i], i)] return (nts.Tsd(lfp.index.values, phase), nts.Tsd(lfp.index.values, pwr)) else: return nts.Tsd(lfp.index.values, phase)<|docstring|>Continuous Wavelets Transform return phase of lfp in a Tsd array<|endoftext|>
f5894a33a29e86cf083d838e447f10c852df7f768e75811e010788a25d744a01
def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None): '\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n ' if (win_length is None): win_length = n_fft n = (n_fft + (hop_length * (n_frames - 1))) x = np.zeros(n, dtype=dtype) win_sq = get_window(window, win_length, fftbins=True) win_sq = (librosa_util.normalize(win_sq, norm=norm) ** 2) win_sq = librosa_util.pad_center(win_sq, n_fft) for i in range(n_frames): sample = (i * hop_length) x[sample:min(n, (sample + n_fft))] += win_sq[:max(0, min(n_fft, (n - sample)))] return x
# from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function
uberduck_ml_dev/utils/utils.py
window_sumsquare
Cris140/uberduck-ml-dev
167
python
def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None): '\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n ' if (win_length is None): win_length = n_fft n = (n_fft + (hop_length * (n_frames - 1))) x = np.zeros(n, dtype=dtype) win_sq = get_window(window, win_length, fftbins=True) win_sq = (librosa_util.normalize(win_sq, norm=norm) ** 2) win_sq = librosa_util.pad_center(win_sq, n_fft) for i in range(n_frames): sample = (i * hop_length) x[sample:min(n, (sample + n_fft))] += win_sq[:max(0, min(n_fft, (n - sample)))] return x
def window_sumsquare(window, n_frames, hop_length=200, win_length=800, n_fft=800, dtype=np.float32, norm=None): '\n # from librosa 0.6\n Compute the sum-square envelope of a window function at a given hop length.\n\n This is used to estimate modulation effects induced by windowing\n observations in short-time fourier transforms.\n\n Parameters\n ----------\n window : string, tuple, number, callable, or list-like\n Window specification, as in `get_window`\n\n n_frames : int > 0\n The number of analysis frames\n\n hop_length : int > 0\n The number of samples to advance between frames\n\n win_length : [optional]\n The length of the window function. By default, this matches `n_fft`.\n\n n_fft : int > 0\n The length of each analysis frame.\n\n dtype : np.dtype\n The data type of the output\n\n Returns\n -------\n wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`\n The sum-squared envelope of the window function\n ' if (win_length is None): win_length = n_fft n = (n_fft + (hop_length * (n_frames - 1))) x = np.zeros(n, dtype=dtype) win_sq = get_window(window, win_length, fftbins=True) win_sq = (librosa_util.normalize(win_sq, norm=norm) ** 2) win_sq = librosa_util.pad_center(win_sq, n_fft) for i in range(n_frames): sample = (i * hop_length) x[sample:min(n, (sample + n_fft))] += win_sq[:max(0, min(n_fft, (n - sample)))] return x<|docstring|># from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function<|endoftext|>
2b3ec346caa42a07c83b9e18c0896681b179ef23c47a4e4c5ed42f3373d585da
def griffin_lim(magnitudes, stft_fn, n_iters=30): '\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n ' angles = np.angle(np.exp(((2j * np.pi) * np.random.rand(*magnitudes.size())))) angles = angles.astype(np.float32) angles = torch.autograd.Variable(torch.from_numpy(angles)) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) for i in range(n_iters): (_, angles) = stft_fn.transform(signal) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) return signal
PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
uberduck_ml_dev/utils/utils.py
griffin_lim
Cris140/uberduck-ml-dev
167
python
def griffin_lim(magnitudes, stft_fn, n_iters=30): '\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n ' angles = np.angle(np.exp(((2j * np.pi) * np.random.rand(*magnitudes.size())))) angles = angles.astype(np.float32) angles = torch.autograd.Variable(torch.from_numpy(angles)) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) for i in range(n_iters): (_, angles) = stft_fn.transform(signal) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) return signal
def griffin_lim(magnitudes, stft_fn, n_iters=30): '\n PARAMS\n ------\n magnitudes: spectrogram magnitudes\n stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods\n ' angles = np.angle(np.exp(((2j * np.pi) * np.random.rand(*magnitudes.size())))) angles = angles.astype(np.float32) angles = torch.autograd.Variable(torch.from_numpy(angles)) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) for i in range(n_iters): (_, angles) = stft_fn.transform(signal) signal = stft_fn.inverse(magnitudes, angles).squeeze(1) return signal<|docstring|>PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods<|endoftext|>
4d03ce31d2336c0c6159582e262b0a9f809ad01c71b07630d4090e5174374e3c
def dynamic_range_compression(x, C=1, clip_val=1e-05): '\n PARAMS\n ------\n C: compression factor\n ' return torch.log((torch.clamp(x, min=clip_val) * C))
PARAMS ------ C: compression factor
uberduck_ml_dev/utils/utils.py
dynamic_range_compression
Cris140/uberduck-ml-dev
167
python
def dynamic_range_compression(x, C=1, clip_val=1e-05): '\n PARAMS\n ------\n C: compression factor\n ' return torch.log((torch.clamp(x, min=clip_val) * C))
def dynamic_range_compression(x, C=1, clip_val=1e-05): '\n PARAMS\n ------\n C: compression factor\n ' return torch.log((torch.clamp(x, min=clip_val) * C))<|docstring|>PARAMS ------ C: compression factor<|endoftext|>
22b22b9c1ff123db5e96e9d8ac124d543df25750bb9beea6983e9210b57ba398
def dynamic_range_decompression(x, C=1): '\n PARAMS\n ------\n C: compression factor used to compress\n ' return (torch.exp(x) / C)
PARAMS ------ C: compression factor used to compress
uberduck_ml_dev/utils/utils.py
dynamic_range_decompression
Cris140/uberduck-ml-dev
167
python
def dynamic_range_decompression(x, C=1): '\n PARAMS\n ------\n C: compression factor used to compress\n ' return (torch.exp(x) / C)
def dynamic_range_decompression(x, C=1): '\n PARAMS\n ------\n C: compression factor used to compress\n ' return (torch.exp(x) / C)<|docstring|>PARAMS ------ C: compression factor used to compress<|endoftext|>
4946cc4acc3a35bb9d0b518da3058f76eed4f476486b90828d3ebaaf578d6aa2
def get_mask_from_lengths(lengths: torch.Tensor, max_len: int=0): 'Return a mask matrix. Unmasked entires are true.' if (max_len == 0): max_len = int(torch.max(lengths).item()) ids = torch.arange(0, max_len, device=lengths.device, dtype=torch.long) mask = (ids < lengths.unsqueeze(1)).bool() return mask
Return a mask matrix. Unmasked entires are true.
uberduck_ml_dev/utils/utils.py
get_mask_from_lengths
Cris140/uberduck-ml-dev
167
python
def get_mask_from_lengths(lengths: torch.Tensor, max_len: int=0): if (max_len == 0): max_len = int(torch.max(lengths).item()) ids = torch.arange(0, max_len, device=lengths.device, dtype=torch.long) mask = (ids < lengths.unsqueeze(1)).bool() return mask
def get_mask_from_lengths(lengths: torch.Tensor, max_len: int=0): if (max_len == 0): max_len = int(torch.max(lengths).item()) ids = torch.arange(0, max_len, device=lengths.device, dtype=torch.long) mask = (ids < lengths.unsqueeze(1)).bool() return mask<|docstring|>Return a mask matrix. Unmasked entires are true.<|endoftext|>
ef7664adc09d98e00b52ff3e22732bc83df9c4e558cc300e59b071863cf776c1
def convert_pad_shape(pad_shape): 'Reverse, then flatten a list of lists.' l = pad_shape[::(- 1)] pad_shape = [item for sublist in l for item in sublist] return pad_shape
Reverse, then flatten a list of lists.
uberduck_ml_dev/utils/utils.py
convert_pad_shape
Cris140/uberduck-ml-dev
167
python
def convert_pad_shape(pad_shape): l = pad_shape[::(- 1)] pad_shape = [item for sublist in l for item in sublist] return pad_shape
def convert_pad_shape(pad_shape): l = pad_shape[::(- 1)] pad_shape = [item for sublist in l for item in sublist] return pad_shape<|docstring|>Reverse, then flatten a list of lists.<|endoftext|>
da70f01d105bad5a56e34ae30b6e180d32a1f6639b5518803fa9cfb7f442db18
def sequence_mask(length, max_length=None): 'The same as get_mask_from_lengths' if (max_length is None): max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) return (x.unsqueeze(0) < length.unsqueeze(1))
The same as get_mask_from_lengths
uberduck_ml_dev/utils/utils.py
sequence_mask
Cris140/uberduck-ml-dev
167
python
def sequence_mask(length, max_length=None): if (max_length is None): max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) return (x.unsqueeze(0) < length.unsqueeze(1))
def sequence_mask(length, max_length=None): if (max_length is None): max_length = length.max() x = torch.arange(max_length, dtype=length.dtype, device=length.device) return (x.unsqueeze(0) < length.unsqueeze(1))<|docstring|>The same as get_mask_from_lengths<|endoftext|>
bb415066c5c77df91c5194a18c9af0c7f08f443c3baf64abc8465586ace6df75
def generate_path(duration, mask): '\n duration: [b, 1, t_x]\n mask: [b, 1, t_y, t_x]\n ' device = duration.device (b, _, t_y, t_x) = mask.shape cum_duration = torch.cumsum(duration, (- 1)) cum_duration_flat = cum_duration.view((b * t_x)) path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = (path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[(:, :(- 1))]) path = (path.unsqueeze(1).transpose(2, 3) * mask) return path
duration: [b, 1, t_x] mask: [b, 1, t_y, t_x]
uberduck_ml_dev/utils/utils.py
generate_path
Cris140/uberduck-ml-dev
167
python
def generate_path(duration, mask): '\n duration: [b, 1, t_x]\n mask: [b, 1, t_y, t_x]\n ' device = duration.device (b, _, t_y, t_x) = mask.shape cum_duration = torch.cumsum(duration, (- 1)) cum_duration_flat = cum_duration.view((b * t_x)) path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = (path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[(:, :(- 1))]) path = (path.unsqueeze(1).transpose(2, 3) * mask) return path
def generate_path(duration, mask): '\n duration: [b, 1, t_x]\n mask: [b, 1, t_y, t_x]\n ' device = duration.device (b, _, t_y, t_x) = mask.shape cum_duration = torch.cumsum(duration, (- 1)) cum_duration_flat = cum_duration.view((b * t_x)) path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = (path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[(:, :(- 1))]) path = (path.unsqueeze(1).transpose(2, 3) * mask) return path<|docstring|>duration: [b, 1, t_x] mask: [b, 1, t_y, t_x]<|endoftext|>
654f258f5f960eb270d40135683c04e35d5bd7dc89dabb8a36729f921507b809
def draw(self, surface): 'draw all sprites onto a surface in z order (lowest z first)' spritedict = self.spritedict items = sorted(spritedict.items(), key=(lambda a: a[0].z)) surface_blit = surface.blit dirty = self.lostsprites self.lostsprites = [] dirty_append = dirty.append for (s, r) in items: newrect = surface_blit(s.image, s.rect) surface_blit(s.image, s.rect) if (r != 0): dirty_append(newrect.union(r)) else: dirty_append(newrect) spritedict[s] = newrect return dirty
draw all sprites onto a surface in z order (lowest z first)
giftrun-0.1/gamesys.py
draw
olemb/giftrun
4
python
def draw(self, surface): spritedict = self.spritedict items = sorted(spritedict.items(), key=(lambda a: a[0].z)) surface_blit = surface.blit dirty = self.lostsprites self.lostsprites = [] dirty_append = dirty.append for (s, r) in items: newrect = surface_blit(s.image, s.rect) surface_blit(s.image, s.rect) if (r != 0): dirty_append(newrect.union(r)) else: dirty_append(newrect) spritedict[s] = newrect return dirty
def draw(self, surface): spritedict = self.spritedict items = sorted(spritedict.items(), key=(lambda a: a[0].z)) surface_blit = surface.blit dirty = self.lostsprites self.lostsprites = [] dirty_append = dirty.append for (s, r) in items: newrect = surface_blit(s.image, s.rect) surface_blit(s.image, s.rect) if (r != 0): dirty_append(newrect.union(r)) else: dirty_append(newrect) spritedict[s] = newrect return dirty<|docstring|>draw all sprites onto a surface in z order (lowest z first)<|endoftext|>
d9487edf2795b57803767fa64d2afc91386d7b9c767ce7856de2dc0bc528bd3c
def init(self): 'Called at init time. Put initialization code for the game here.'
Called at init time. Put initialization code for the game here.
giftrun-0.1/gamesys.py
init
olemb/giftrun
4
python
def init(self):
def init(self): <|docstring|>Called at init time. Put initialization code for the game here.<|endoftext|>
078ba0fb831e041d6c63e77033887b644d740b05c62c55bea7d758abeea291bd
def update(self, events): 'Handles events and updates the objects. Called every frame.' pass
Handles events and updates the objects. Called every frame.
giftrun-0.1/gamesys.py
update
olemb/giftrun
4
python
def update(self, events): pass
def update(self, events): pass<|docstring|>Handles events and updates the objects. Called every frame.<|endoftext|>
a009c36b602dd9bb5c22ab746fa8046f2e63c6c8726c65a04ad53a27865f2227
def test_statement_find_by_account(session): 'Assert that the statement settings by id works.' bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) assert (statements.get('total') == 1)
Assert that the statement settings by id works.
pay-api/tests/unit/services/test_statement.py
test_statement_find_by_account
thorwolpert/sbc-pay
4
python
def test_statement_find_by_account(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) assert (statements.get('total') == 1)
def test_statement_find_by_account(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) assert (statements.get('total') == 1)<|docstring|>Assert that the statement settings by id works.<|endoftext|>
a5d6d0e7926a2831701fa02123a5ea6349af0ba21a82771111191b30da21f4b7
def test_get_statement_report(session): 'Assert that the get statement report works.' bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)
Assert that the get statement report works.
pay-api/tests/unit/services/test_statement.py
test_get_statement_report
thorwolpert/sbc-pay
4
python
def test_get_statement_report(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)
def test_get_statement_report(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)<|docstring|>Assert that the get statement report works.<|endoftext|>
1689205c764bd515dba3ee977588e7e51fa728dbf9ebd0786de35fef4bdbbe4e
def test_get_statement_report_for_empty_invoices(session): 'Assert that the get statement report works for statement with no invoices.' bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)
Assert that the get statement report works for statement with no invoices.
pay-api/tests/unit/services/test_statement.py
test_get_statement_report_for_empty_invoices
thorwolpert/sbc-pay
4
python
def test_get_statement_report_for_empty_invoices(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)
def test_get_statement_report_for_empty_invoices(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.DAILY.value, statement_settings_id=settings_model.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)<|docstring|>Assert that the get statement report works for statement with no invoices.<|endoftext|>
8fe8067819857cc003b8c83b959c57e930525d4ee1ba4d089c8c54b6ea836562
def test_get_weekly_statement_report(session): 'Assert that the get statement report works.' bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.WEEKLY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.WEEKLY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)
Assert that the get statement report works.
pay-api/tests/unit/services/test_statement.py
test_get_weekly_statement_report
thorwolpert/sbc-pay
4
python
def test_get_weekly_statement_report(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.WEEKLY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.WEEKLY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)
def test_get_weekly_statement_report(session): bcol_account = factory_premium_payment_account() bcol_account.save() payment = factory_payment() payment.save() i = factory_invoice(payment_account=bcol_account) i.save() factory_invoice_reference(i.id).save() factory_payment_line_item(invoice_id=i.id, fee_schedule_id=1).save() settings_model = factory_statement_settings(payment_account_id=bcol_account.id, frequency=StatementFrequency.WEEKLY.value) statement_model = factory_statement(payment_account_id=bcol_account.id, frequency=StatementFrequency.WEEKLY.value, statement_settings_id=settings_model.id) factory_statement_invoices(statement_id=statement_model.id, invoice_id=i.id) payment_account = PaymentAccount.find_by_id(bcol_account.id) statements = StatementService.find_by_account_id(payment_account.auth_account_id, page=1, limit=10) assert (statements is not None) (report_response, report_name) = StatementService.get_statement_report(statement_id=statement_model.id, content_type='application/pdf', auth=get_auth_premium_user()) assert (report_response is not None)<|docstring|>Assert that the get statement report works.<|endoftext|>
60cd1b430f12cee1e94262372524af674ed97bca0ef56a406c555fe4b386eaa9
@property def incenter(self): '\n The intersection of angle bisectors, Point.\n\n ' n = len(self.vertices) return (sum([(s * p) for (s, p) in zip(self.sides, self.vertices)]) / n)
The intersection of angle bisectors, Point.
src/geometry3/polygon.py
incenter
JnyJny/Geometry3
0
python
@property def incenter(self): '\n \n\n ' n = len(self.vertices) return (sum([(s * p) for (s, p) in zip(self.sides, self.vertices)]) / n)
@property def incenter(self): '\n \n\n ' n = len(self.vertices) return (sum([(s * p) for (s, p) in zip(self.sides, self.vertices)]) / n)<|docstring|>The intersection of angle bisectors, Point.<|endoftext|>
08afb3a977e75d5406819bd1ce80b8a9e9350837e93cb76bc442a494c887f476
def search_sysout(captured, find_me): 'Search capsys message for find_me, return message' for msg in captured.out.split('/n'): if (find_me in msg): return msg return ''
Search capsys message for find_me, return message
tests/unit_tests/test_set_performance_config.py
search_sysout
flywheel-apps/bids-mriqc
0
python
def search_sysout(captured, find_me): for msg in captured.out.split('/n'): if (find_me in msg): return msg return
def search_sysout(captured, find_me): for msg in captured.out.split('/n'): if (find_me in msg): return msg return <|docstring|>Search capsys message for find_me, return message<|endoftext|>
947eceb854f39e7d43069c271267731fb1d0d1d80acfc753f861d8e2f05a8b63
def search_stdout_contains(captured, find_me, contains_me): 'Search stdout message for find_me, return true if it contains contains_me' for msg in captured.out.split('/n'): if (find_me in msg): print(f"Found '{find_me}' in '{msg}'") if (contains_me in msg): print(f"Found '{contains_me}' in '{msg}'") return True return False
Search stdout message for find_me, return true if it contains contains_me
tests/unit_tests/test_set_performance_config.py
search_stdout_contains
flywheel-apps/bids-mriqc
0
python
def search_stdout_contains(captured, find_me, contains_me): for msg in captured.out.split('/n'): if (find_me in msg): print(f"Found '{find_me}' in '{msg}'") if (contains_me in msg): print(f"Found '{contains_me}' in '{msg}'") return True return False
def search_stdout_contains(captured, find_me, contains_me): for msg in captured.out.split('/n'): if (find_me in msg): print(f"Found '{find_me}' in '{msg}'") if (contains_me in msg): print(f"Found '{contains_me}' in '{msg}'") return True return False<|docstring|>Search stdout message for find_me, return true if it contains contains_me<|endoftext|>
fabb47d62c472441a5550953f6da4c222fef965b89fcd8a3ffd8a361fcbb73a0
def gpio_handler(): ' Thread to handle buttons connected to GPIO pins. ' all_buttons = {} dpad_bits = DpadBits() def gpio_pressed(button): ' Called when button connected to GPIO pin is pressed/closed ' print('pressed', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.press(ns_button) else: Gamepad.dPad(dpad_bits.set_bit((255 - ns_button))) else: print('Invalid button') def gpio_released(button): ' Called when button connected to GPIO pin is released/opened ' print('released', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.release(ns_button) else: Gamepad.dPad(dpad_bits.clear_bit((255 - ns_button))) else: print('Invalid button') gpio_ns_map = ({'gpio_number': 4, 'ns_button': NSButton.LEFT_THROTTLE}, {'gpio_number': 17, 'ns_button': NSButton.LEFT_TRIGGER}, {'gpio_number': 27, 'ns_button': NSButton.MINUS}, {'gpio_number': 22, 'ns_button': NSButton.CAPTURE}, {'gpio_number': 5, 'ns_button': 255}, {'gpio_number': 6, 'ns_button': 254}, {'gpio_number': 13, 'ns_button': 253}, {'gpio_number': 19, 'ns_button': 252}, {'gpio_number': 26, 'ns_button': NSButton.LEFT_STICK}, {'gpio_number': 23, 'ns_button': NSButton.RIGHT_THROTTLE}, {'gpio_number': 24, 'ns_button': NSButton.RIGHT_TRIGGER}, {'gpio_number': 25, 'ns_button': NSButton.PLUS}, {'gpio_number': 8, 'ns_button': NSButton.HOME}, {'gpio_number': 7, 'ns_button': NSButton.A}, {'gpio_number': 12, 'ns_button': NSButton.B}, {'gpio_number': 16, 'ns_button': NSButton.X}, {'gpio_number': 20, 'ns_button': NSButton.Y}, {'gpio_number': 21, 'ns_button': NSButton.RIGHT_STICK}) for element in gpio_ns_map: element['button'] = Button(element['gpio_number']) all_buttons[element['button'].pin] = element['ns_button'] element['button'].when_pressed = gpio_pressed element['button'].when_released = gpio_released signal.pause()
Thread to handle buttons connected to GPIO pins.
NSGamepad/Code/gamepad_ns_gpio.py
gpio_handler
gdsports/RaspberryPi-Joystick
70
python
def gpio_handler(): ' ' all_buttons = {} dpad_bits = DpadBits() def gpio_pressed(button): ' Called when button connected to GPIO pin is pressed/closed ' print('pressed', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.press(ns_button) else: Gamepad.dPad(dpad_bits.set_bit((255 - ns_button))) else: print('Invalid button') def gpio_released(button): ' Called when button connected to GPIO pin is released/opened ' print('released', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.release(ns_button) else: Gamepad.dPad(dpad_bits.clear_bit((255 - ns_button))) else: print('Invalid button') gpio_ns_map = ({'gpio_number': 4, 'ns_button': NSButton.LEFT_THROTTLE}, {'gpio_number': 17, 'ns_button': NSButton.LEFT_TRIGGER}, {'gpio_number': 27, 'ns_button': NSButton.MINUS}, {'gpio_number': 22, 'ns_button': NSButton.CAPTURE}, {'gpio_number': 5, 'ns_button': 255}, {'gpio_number': 6, 'ns_button': 254}, {'gpio_number': 13, 'ns_button': 253}, {'gpio_number': 19, 'ns_button': 252}, {'gpio_number': 26, 'ns_button': NSButton.LEFT_STICK}, {'gpio_number': 23, 'ns_button': NSButton.RIGHT_THROTTLE}, {'gpio_number': 24, 'ns_button': NSButton.RIGHT_TRIGGER}, {'gpio_number': 25, 'ns_button': NSButton.PLUS}, {'gpio_number': 8, 'ns_button': NSButton.HOME}, {'gpio_number': 7, 'ns_button': NSButton.A}, {'gpio_number': 12, 'ns_button': NSButton.B}, {'gpio_number': 16, 'ns_button': NSButton.X}, {'gpio_number': 20, 'ns_button': NSButton.Y}, {'gpio_number': 21, 'ns_button': NSButton.RIGHT_STICK}) for element in gpio_ns_map: element['button'] = Button(element['gpio_number']) all_buttons[element['button'].pin] = element['ns_button'] element['button'].when_pressed = gpio_pressed element['button'].when_released = gpio_released signal.pause()
def gpio_handler(): ' ' all_buttons = {} dpad_bits = DpadBits() def gpio_pressed(button): ' Called when button connected to GPIO pin is pressed/closed ' print('pressed', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.press(ns_button) else: Gamepad.dPad(dpad_bits.set_bit((255 - ns_button))) else: print('Invalid button') def gpio_released(button): ' Called when button connected to GPIO pin is released/opened ' print('released', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.release(ns_button) else: Gamepad.dPad(dpad_bits.clear_bit((255 - ns_button))) else: print('Invalid button') gpio_ns_map = ({'gpio_number': 4, 'ns_button': NSButton.LEFT_THROTTLE}, {'gpio_number': 17, 'ns_button': NSButton.LEFT_TRIGGER}, {'gpio_number': 27, 'ns_button': NSButton.MINUS}, {'gpio_number': 22, 'ns_button': NSButton.CAPTURE}, {'gpio_number': 5, 'ns_button': 255}, {'gpio_number': 6, 'ns_button': 254}, {'gpio_number': 13, 'ns_button': 253}, {'gpio_number': 19, 'ns_button': 252}, {'gpio_number': 26, 'ns_button': NSButton.LEFT_STICK}, {'gpio_number': 23, 'ns_button': NSButton.RIGHT_THROTTLE}, {'gpio_number': 24, 'ns_button': NSButton.RIGHT_TRIGGER}, {'gpio_number': 25, 'ns_button': NSButton.PLUS}, {'gpio_number': 8, 'ns_button': NSButton.HOME}, {'gpio_number': 7, 'ns_button': NSButton.A}, {'gpio_number': 12, 'ns_button': NSButton.B}, {'gpio_number': 16, 'ns_button': NSButton.X}, {'gpio_number': 20, 'ns_button': NSButton.Y}, {'gpio_number': 21, 'ns_button': NSButton.RIGHT_STICK}) for element in gpio_ns_map: element['button'] = Button(element['gpio_number']) all_buttons[element['button'].pin] = element['ns_button'] element['button'].when_pressed = gpio_pressed element['button'].when_released = gpio_released signal.pause()<|docstring|>Thread to handle buttons connected to GPIO pins.<|endoftext|>
3e05bf599a3b8cf887ff3e6b27df5589416b778b3284320f9b6f504d4364f801
def main(): ' main program ' threading.Thread(target=gpio_handler, args=(), daemon=True).start() Gamepad.begin('/dev/hidg0') while True: ' Read from keyboard and mouse input using evdev? ' pass
main program
NSGamepad/Code/gamepad_ns_gpio.py
main
gdsports/RaspberryPi-Joystick
70
python
def main(): ' ' threading.Thread(target=gpio_handler, args=(), daemon=True).start() Gamepad.begin('/dev/hidg0') while True: ' Read from keyboard and mouse input using evdev? ' pass
def main(): ' ' threading.Thread(target=gpio_handler, args=(), daemon=True).start() Gamepad.begin('/dev/hidg0') while True: ' Read from keyboard and mouse input using evdev? ' pass<|docstring|>main program<|endoftext|>
01a75d51c4b26415e83346814a97d4fdf618cdacbf3b0438a2c390f563793270
def set_bit(self, bit_num): ' Set bit in direction pad bit map. Update NSGadget direction pad. ' self.dpad_bits |= (1 << bit_num) return BUTTONS_MAP_DPAD[self.dpad_bits]
Set bit in direction pad bit map. Update NSGadget direction pad.
NSGamepad/Code/gamepad_ns_gpio.py
set_bit
gdsports/RaspberryPi-Joystick
70
python
def set_bit(self, bit_num): ' ' self.dpad_bits |= (1 << bit_num) return BUTTONS_MAP_DPAD[self.dpad_bits]
def set_bit(self, bit_num): ' ' self.dpad_bits |= (1 << bit_num) return BUTTONS_MAP_DPAD[self.dpad_bits]<|docstring|>Set bit in direction pad bit map. Update NSGadget direction pad.<|endoftext|>
5cfed180cf71a408e49266a517104f7ed9dde5c51f3090408242bc923d1d5f7c
def clear_bit(self, bit_num): ' Clear bit in direction pad bit map. Update NSGadget direction pad. ' self.dpad_bits &= (~ (1 << bit_num)) return BUTTONS_MAP_DPAD[self.dpad_bits]
Clear bit in direction pad bit map. Update NSGadget direction pad.
NSGamepad/Code/gamepad_ns_gpio.py
clear_bit
gdsports/RaspberryPi-Joystick
70
python
def clear_bit(self, bit_num): ' ' self.dpad_bits &= (~ (1 << bit_num)) return BUTTONS_MAP_DPAD[self.dpad_bits]
def clear_bit(self, bit_num): ' ' self.dpad_bits &= (~ (1 << bit_num)) return BUTTONS_MAP_DPAD[self.dpad_bits]<|docstring|>Clear bit in direction pad bit map. Update NSGadget direction pad.<|endoftext|>
284ca9416c9e5d54f6d4a1c4803ebd1d30b33d45f25b7b160e9b92e270b6b94c
def gpio_pressed(button): ' Called when button connected to GPIO pin is pressed/closed ' print('pressed', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.press(ns_button) else: Gamepad.dPad(dpad_bits.set_bit((255 - ns_button))) else: print('Invalid button')
Called when button connected to GPIO pin is pressed/closed
NSGamepad/Code/gamepad_ns_gpio.py
gpio_pressed
gdsports/RaspberryPi-Joystick
70
python
def gpio_pressed(button): ' ' print('pressed', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.press(ns_button) else: Gamepad.dPad(dpad_bits.set_bit((255 - ns_button))) else: print('Invalid button')
def gpio_pressed(button): ' ' print('pressed', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.press(ns_button) else: Gamepad.dPad(dpad_bits.set_bit((255 - ns_button))) else: print('Invalid button')<|docstring|>Called when button connected to GPIO pin is pressed/closed<|endoftext|>
41c7e2d268116c16586cb5e3b70846edc9b6169fcefae0a294dbc3c8ef11923b
def gpio_released(button): ' Called when button connected to GPIO pin is released/opened ' print('released', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.release(ns_button) else: Gamepad.dPad(dpad_bits.clear_bit((255 - ns_button))) else: print('Invalid button')
Called when button connected to GPIO pin is released/opened
NSGamepad/Code/gamepad_ns_gpio.py
gpio_released
gdsports/RaspberryPi-Joystick
70
python
def gpio_released(button): ' ' print('released', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.release(ns_button) else: Gamepad.dPad(dpad_bits.clear_bit((255 - ns_button))) else: print('Invalid button')
def gpio_released(button): ' ' print('released', button.pin) if (button.pin in all_buttons): ns_button = all_buttons[button.pin] if (ns_button < 128): Gamepad.release(ns_button) else: Gamepad.dPad(dpad_bits.clear_bit((255 - ns_button))) else: print('Invalid button')<|docstring|>Called when button connected to GPIO pin is released/opened<|endoftext|>
d019913a2d22038d1df68932b2b61088ac5522c50cfbe6ea427f92baa6620dfb
def error(self, message: str) -> None: ' Raises an SnakeParseException with the given message.' raise SnakeParseException(message)
Raises an SnakeParseException with the given message.
src/snakeparse/api.py
error
nh13/snakeparse
47
python
def error(self, message: str) -> None: ' ' raise SnakeParseException(message)
def error(self, message: str) -> None: ' ' raise SnakeParseException(message)<|docstring|>Raises an SnakeParseException with the given message.<|endoftext|>
fcd681592981994a1c295d09227451ebf7308efd9c427ee56c672389dcdef986
@abstractmethod def parse_args(self, args: List[str]) -> Any: 'Parses the command line arguments.'
Parses the command line arguments.
src/snakeparse/api.py
parse_args
nh13/snakeparse
47
python
@abstractmethod def parse_args(self, args: List[str]) -> Any:
@abstractmethod def parse_args(self, args: List[str]) -> Any: <|docstring|>Parses the command line arguments.<|endoftext|>
b76cb9320ee3c6ab551aa1d3661952a7053d231029678e8c10f72d92753bc029
@abstractmethod def parse_args_file(self, args_file: Path) -> Any: 'Parses command line arguments from an arguments file'
Parses command line arguments from an arguments file
src/snakeparse/api.py
parse_args_file
nh13/snakeparse
47
python
@abstractmethod def parse_args_file(self, args_file: Path) -> Any:
@abstractmethod def parse_args_file(self, args_file: Path) -> Any: <|docstring|>Parses command line arguments from an arguments file<|endoftext|>
ba9e5499b80760f22443e37f1b93040cdff1f7b440075dbdcd909f021f83c7e1
def parse_config(self, config: dict) -> Any: 'Parses arguments from a Snakemake config object. It is assumed the\n arguments are contained in an arguments file, whose path is stored in\n the config with key ``SnakeParse.ARGUMENT_FILE_NAME_KEY``.' args_file = config[SnakeParse.ARGUMENT_FILE_NAME_KEY] if (args_file is not None): args_file = Path(config[SnakeParse.ARGUMENT_FILE_NAME_KEY]) return self.parse_args_file(args_file=args_file) else: try: return self.parse_args(['']) except SnakeParseException: return argparse.Namespace()
Parses arguments from a Snakemake config object. It is assumed the arguments are contained in an arguments file, whose path is stored in the config with key ``SnakeParse.ARGUMENT_FILE_NAME_KEY``.
src/snakeparse/api.py
parse_config
nh13/snakeparse
47
python
def parse_config(self, config: dict) -> Any: 'Parses arguments from a Snakemake config object. It is assumed the\n arguments are contained in an arguments file, whose path is stored in\n the config with key ``SnakeParse.ARGUMENT_FILE_NAME_KEY``.' args_file = config[SnakeParse.ARGUMENT_FILE_NAME_KEY] if (args_file is not None): args_file = Path(config[SnakeParse.ARGUMENT_FILE_NAME_KEY]) return self.parse_args_file(args_file=args_file) else: try: return self.parse_args([]) except SnakeParseException: return argparse.Namespace()
def parse_config(self, config: dict) -> Any: 'Parses arguments from a Snakemake config object. It is assumed the\n arguments are contained in an arguments file, whose path is stored in\n the config with key ``SnakeParse.ARGUMENT_FILE_NAME_KEY``.' args_file = config[SnakeParse.ARGUMENT_FILE_NAME_KEY] if (args_file is not None): args_file = Path(config[SnakeParse.ARGUMENT_FILE_NAME_KEY]) return self.parse_args_file(args_file=args_file) else: try: return self.parse_args([]) except SnakeParseException: return argparse.Namespace()<|docstring|>Parses arguments from a Snakemake config object. It is assumed the arguments are contained in an arguments file, whose path is stored in the config with key ``SnakeParse.ARGUMENT_FILE_NAME_KEY``.<|endoftext|>
3af58ec9c714f8d7ca51209e5b41792104bb1ad906fa59a2e9be63e635e641f2
@abstractmethod def print_help(self, file: Optional[IO[str]]=None) -> None: 'Prints the help message'
Prints the help message
src/snakeparse/api.py
print_help
nh13/snakeparse
47
python
@abstractmethod def print_help(self, file: Optional[IO[str]]=None) -> None:
@abstractmethod def print_help(self, file: Optional[IO[str]]=None) -> None: <|docstring|>Prints the help message<|endoftext|>
cff24f17a0d9e82bec98f8992108fb54d67e4c508ef635451919e053a4c461c5
@property def group(self) -> Optional[str]: 'The name of the workflow group to which this group belongs.' return self._group
The name of the workflow group to which this group belongs.
src/snakeparse/api.py
group
nh13/snakeparse
47
python
@property def group(self) -> Optional[str]: return self._group
@property def group(self) -> Optional[str]: return self._group<|docstring|>The name of the workflow group to which this group belongs.<|endoftext|>
9a042c8966465dd95e1b15250ccbf5ad80be3b2d806f4336147922fdc8909616
@property def description(self) -> Optional[str]: 'A short description of the workflow, used when listing the\n workflows.\n ' return self._description
A short description of the workflow, used when listing the workflows.
src/snakeparse/api.py
description
nh13/snakeparse
47
python
@property def description(self) -> Optional[str]: 'A short description of the workflow, used when listing the\n workflows.\n ' return self._description
@property def description(self) -> Optional[str]: 'A short description of the workflow, used when listing the\n workflows.\n ' return self._description<|docstring|>A short description of the workflow, used when listing the workflows.<|endoftext|>
099c127e2b4ff7dab90c9a33c576cd35b1b665ed96d295b399c385c5f40b5c94
def parse_args(self, args: List[str]) -> Any: 'Parses the command line arguments.' return self.parser.parse_args(args=args)
Parses the command line arguments.
src/snakeparse/api.py
parse_args
nh13/snakeparse
47
python
def parse_args(self, args: List[str]) -> Any: return self.parser.parse_args(args=args)
def parse_args(self, args: List[str]) -> Any: return self.parser.parse_args(args=args)<|docstring|>Parses the command line arguments.<|endoftext|>
fe1465269975df9e8e6c0198e3bf47b42f3973b0d6596d5c6b46a445323a67f5
def parse_args_file(self, args_file: Path) -> Any: 'Parses command line arguments from an arguments file' return self.parse_args(args=[('@' + str(args_file))])
Parses command line arguments from an arguments file
src/snakeparse/api.py
parse_args_file
nh13/snakeparse
47
python
def parse_args_file(self, args_file: Path) -> Any: return self.parse_args(args=[('@' + str(args_file))])
def parse_args_file(self, args_file: Path) -> Any: return self.parse_args(args=[('@' + str(args_file))])<|docstring|>Parses command line arguments from an arguments file<|endoftext|>
e8a6980d8b9e2825e57a2ca07dc73a7eb7ebfa3755591754e8c31556067541f8
def print_help(self, file: Optional[IO[str]]=None) -> None: 'Prints the help message' self.parser.print_help(suppress=False, file=file)
Prints the help message
src/snakeparse/api.py
print_help
nh13/snakeparse
47
python
def print_help(self, file: Optional[IO[str]]=None) -> None: self.parser.print_help(suppress=False, file=file)
def print_help(self, file: Optional[IO[str]]=None) -> None: self.parser.print_help(suppress=False, file=file)<|docstring|>Prints the help message<|endoftext|>
e0b78f6f386b80d792595db83ef5f7d98fe853985da0d5cb5394d392245c9997
def add_workflow(self, workflow: SnakeParseWorkflow) -> 'SnakeParseWorkflow': 'Adds the workflow to the list of workflows. A workflow with the same\n name should not exist.' if (workflow.name in self.workflows): raise SnakeParseException(f"Multiple workflows with name '{workflow.name}'.") self.workflows[workflow.name] = workflow return workflow
Adds the workflow to the list of workflows. A workflow with the same name should not exist.
src/snakeparse/api.py
add_workflow
nh13/snakeparse
47
python
def add_workflow(self, workflow: SnakeParseWorkflow) -> 'SnakeParseWorkflow': 'Adds the workflow to the list of workflows. A workflow with the same\n name should not exist.' if (workflow.name in self.workflows): raise SnakeParseException(f"Multiple workflows with name '{workflow.name}'.") self.workflows[workflow.name] = workflow return workflow
def add_workflow(self, workflow: SnakeParseWorkflow) -> 'SnakeParseWorkflow': 'Adds the workflow to the list of workflows. A workflow with the same\n name should not exist.' if (workflow.name in self.workflows): raise SnakeParseException(f"Multiple workflows with name '{workflow.name}'.") self.workflows[workflow.name] = workflow return workflow<|docstring|>Adds the workflow to the list of workflows. A workflow with the same name should not exist.<|endoftext|>
eaa8a6b7caef094ed5d8a09731d05ebc5b28af442f1398685724c2afc07dc744
def add_snakefile(self, snakefile: Path) -> 'SnakeParseWorkflow': 'Adds a new workflow with the given snakefile. A workflow with the\n same name should not exist.' name = snakefile.with_suffix('').name if (self.name_transform is not None): name = self.name_transform(name) snakefile = snakefile group = (snakefile.parent.name if self.parent_dir_is_group_name else None) description = None if (name in self.workflows): raise SnakeParseException(f"Multiple workflows with name '{name}'.") workflow = SnakeParseWorkflow(name=name, snakefile=snakefile, group=group, description=description) return self.add_workflow(workflow=workflow)
Adds a new workflow with the given snakefile. A workflow with the same name should not exist.
src/snakeparse/api.py
add_snakefile
nh13/snakeparse
47
python
def add_snakefile(self, snakefile: Path) -> 'SnakeParseWorkflow': 'Adds a new workflow with the given snakefile. A workflow with the\n same name should not exist.' name = snakefile.with_suffix().name if (self.name_transform is not None): name = self.name_transform(name) snakefile = snakefile group = (snakefile.parent.name if self.parent_dir_is_group_name else None) description = None if (name in self.workflows): raise SnakeParseException(f"Multiple workflows with name '{name}'.") workflow = SnakeParseWorkflow(name=name, snakefile=snakefile, group=group, description=description) return self.add_workflow(workflow=workflow)
def add_snakefile(self, snakefile: Path) -> 'SnakeParseWorkflow': 'Adds a new workflow with the given snakefile. A workflow with the\n same name should not exist.' name = snakefile.with_suffix().name if (self.name_transform is not None): name = self.name_transform(name) snakefile = snakefile group = (snakefile.parent.name if self.parent_dir_is_group_name else None) description = None if (name in self.workflows): raise SnakeParseException(f"Multiple workflows with name '{name}'.") workflow = SnakeParseWorkflow(name=name, snakefile=snakefile, group=group, description=description) return self.add_workflow(workflow=workflow)<|docstring|>Adds a new workflow with the given snakefile. A workflow with the same name should not exist.<|endoftext|>
8671695cba3c12769fb376e087fcc03a58477eed45b933bfc226244d13560bcf
def add_group(self, name: str, description: str, strict: bool=True) -> 'SnakeParseConfig': 'Adds a new group with the given name and description. If strict is\n ``True``, then no group with the same name should already exist. ' if (strict and (name in self.groups)): raise SnakeParseException(f"Group '{name}' already defined") self.groups[name] = description return self
Adds a new group with the given name and description. If strict is ``True``, then no group with the same name should already exist.
src/snakeparse/api.py
add_group
nh13/snakeparse
47
python
def add_group(self, name: str, description: str, strict: bool=True) -> 'SnakeParseConfig': 'Adds a new group with the given name and description. If strict is\n ``True``, then no group with the same name should already exist. ' if (strict and (name in self.groups)): raise SnakeParseException(f"Group '{name}' already defined") self.groups[name] = description return self
def add_group(self, name: str, description: str, strict: bool=True) -> 'SnakeParseConfig': 'Adds a new group with the given name and description. If strict is\n ``True``, then no group with the same name should already exist. ' if (strict and (name in self.groups)): raise SnakeParseException(f"Group '{name}' already defined") self.groups[name] = description return self<|docstring|>Adds a new group with the given name and description. If strict is ``True``, then no group with the same name should already exist.<|endoftext|>
001a88e781b5acf3dd827918a7886d2a5073cc7f86123e8fa2558d144e2138fa
@staticmethod def name_transfrom_from(key: str) -> Callable[([str], str)]: "Returns the built-in method to format the workflow's name. Should be\n either 'snake_to_camel' or 'camel_to_snake' for converting from Snake case\n to Camel case, or vice versa.\n " if (key is None): return None elif (key == 'snake_to_camel'): return SnakeParseConfig._snake_to_camel elif (key == 'camel_to_snake'): return SnakeParseConfig._camel_to_snake else: raise SnakeParseException(f"Unknown 'name_transform': {key}")
Returns the built-in method to format the workflow's name. Should be either 'snake_to_camel' or 'camel_to_snake' for converting from Snake case to Camel case, or vice versa.
src/snakeparse/api.py
name_transfrom_from
nh13/snakeparse
47
python
@staticmethod def name_transfrom_from(key: str) -> Callable[([str], str)]: "Returns the built-in method to format the workflow's name. Should be\n either 'snake_to_camel' or 'camel_to_snake' for converting from Snake case\n to Camel case, or vice versa.\n " if (key is None): return None elif (key == 'snake_to_camel'): return SnakeParseConfig._snake_to_camel elif (key == 'camel_to_snake'): return SnakeParseConfig._camel_to_snake else: raise SnakeParseException(f"Unknown 'name_transform': {key}")
@staticmethod def name_transfrom_from(key: str) -> Callable[([str], str)]: "Returns the built-in method to format the workflow's name. Should be\n either 'snake_to_camel' or 'camel_to_snake' for converting from Snake case\n to Camel case, or vice versa.\n " if (key is None): return None elif (key == 'snake_to_camel'): return SnakeParseConfig._snake_to_camel elif (key == 'camel_to_snake'): return SnakeParseConfig._camel_to_snake else: raise SnakeParseException(f"Unknown 'name_transform': {key}")<|docstring|>Returns the built-in method to format the workflow's name. Should be either 'snake_to_camel' or 'camel_to_snake' for converting from Snake case to Camel case, or vice versa.<|endoftext|>
2b240dd16d41a4bf4b3069ecd0f4cae10a2441f1126989a0759868fc969fc7a3
@staticmethod def _snake_to_camel(snake_str: str) -> str: 'Converts a string in Snake case to Camel case.' return ''.join([s.title() for s in snake_str.split('_')])
Converts a string in Snake case to Camel case.
src/snakeparse/api.py
_snake_to_camel
nh13/snakeparse
47
python
@staticmethod def _snake_to_camel(snake_str: str) -> str: return .join([s.title() for s in snake_str.split('_')])
@staticmethod def _snake_to_camel(snake_str: str) -> str: return .join([s.title() for s in snake_str.split('_')])<|docstring|>Converts a string in Snake case to Camel case.<|endoftext|>
82bc97b63800ba1a4ac0d1e510734f1f8d66f4891ce41a47640f66e88ca37d91
@staticmethod def _camel_to_snake(camel_str: str) -> str: 'Converts a string in Camel case to Snake case.' if (not camel_str): return camel_str first_char = camel_str[0].lower() return (first_char + ''.join([(('_' + c.lower()) if c.isupper() else c) for c in camel_str[1:]]))
Converts a string in Camel case to Snake case.
src/snakeparse/api.py
_camel_to_snake
nh13/snakeparse
47
python
@staticmethod def _camel_to_snake(camel_str: str) -> str: if (not camel_str): return camel_str first_char = camel_str[0].lower() return (first_char + .join([(('_' + c.lower()) if c.isupper() else c) for c in camel_str[1:]]))
@staticmethod def _camel_to_snake(camel_str: str) -> str: if (not camel_str): return camel_str first_char = camel_str[0].lower() return (first_char + .join([(('_' + c.lower()) if c.isupper() else c) for c in camel_str[1:]]))<|docstring|>Converts a string in Camel case to Snake case.<|endoftext|>
18f7d1aa8df698119e54742666f57c33a274f1b3a36d86c3f208b6d4a17260ee
@staticmethod def parser_from(workflow: 'SnakeParseWorkflow') -> SnakeParser: 'Builds the SnakeParser for the given workflow' parent_module_name = str(workflow.snakefile.resolve().parent) sys.path.insert(0, parent_module_name) exec_exception = None from snakemake.workflow import Workflow snakefile = str(workflow.snakefile) global config config = dict([(SnakeParse.ARGUMENT_FILE_NAME_KEY, None)]) globals_copy = dict(globals()) globals_copy['config'] = dict([(SnakeParse.ARGUMENT_FILE_NAME_KEY, None)]) globals_copy['workflow'] = Workflow(snakefile=snakefile) (code, linemap, rulecount) = snakemake_parser.parse(snakefile) code = compile(code, snakefile, 'exec') try: exec(code, globals_copy) except Exception as e: exec_exception = SnakeParseException(f'Could not compile {snakefile}', e) def classes_predicate(obj: Any) -> bool: return (inspect.isclass(obj) and (not inspect.isabstract(obj)) and (SnakeParser in inspect.getmro(obj))) def methods_predicate(key: str, obj: Any) -> bool: return ((key == 'snakeparser') and inspect.isfunction(obj)) classes = [obj for (key, obj) in globals_copy.items() if classes_predicate(obj)] methods = [obj for (key, obj) in globals_copy.items() if methods_predicate(key, obj)] if ((len(classes) + len(methods)) == 0): raise SnakeParseException(f'Could not find either a concrete subclass of SnakeParser or a method named "snakeparser" in {workflow.snakefile}', exec_exception) elif ((len(classes) + len(methods)) > 1): raise SnakeParseException(f'Found {len(classes)} concrete subclasses of SnakeParser and {len(methods)} methods named snakeparser in {workflow.snakefile}', exec_exception) elif ((len(classes) == 1) and (len(methods) == 0)): parser_class = classes[0] if issubclass(parser_class, SnakeArgumentParser): return parser_class(usage=argparse.SUPPRESS) else: return parser_class() else: assert ((len(classes) == 0) and (len(methods) == 1)), f'Bug: {len(classes)} != 0 and {len(methods)} != 1' parser_method = methods[0] parser = parser_method() if issubclass(parser.__class__, SnakeArgumentParser): return parser_method(usage=argparse.SUPPRESS) else: return parser
Builds the SnakeParser for the given workflow
src/snakeparse/api.py
parser_from
nh13/snakeparse
47
python
@staticmethod def parser_from(workflow: 'SnakeParseWorkflow') -> SnakeParser: parent_module_name = str(workflow.snakefile.resolve().parent) sys.path.insert(0, parent_module_name) exec_exception = None from snakemake.workflow import Workflow snakefile = str(workflow.snakefile) global config config = dict([(SnakeParse.ARGUMENT_FILE_NAME_KEY, None)]) globals_copy = dict(globals()) globals_copy['config'] = dict([(SnakeParse.ARGUMENT_FILE_NAME_KEY, None)]) globals_copy['workflow'] = Workflow(snakefile=snakefile) (code, linemap, rulecount) = snakemake_parser.parse(snakefile) code = compile(code, snakefile, 'exec') try: exec(code, globals_copy) except Exception as e: exec_exception = SnakeParseException(f'Could not compile {snakefile}', e) def classes_predicate(obj: Any) -> bool: return (inspect.isclass(obj) and (not inspect.isabstract(obj)) and (SnakeParser in inspect.getmro(obj))) def methods_predicate(key: str, obj: Any) -> bool: return ((key == 'snakeparser') and inspect.isfunction(obj)) classes = [obj for (key, obj) in globals_copy.items() if classes_predicate(obj)] methods = [obj for (key, obj) in globals_copy.items() if methods_predicate(key, obj)] if ((len(classes) + len(methods)) == 0): raise SnakeParseException(f'Could not find either a concrete subclass of SnakeParser or a method named "snakeparser" in {workflow.snakefile}', exec_exception) elif ((len(classes) + len(methods)) > 1): raise SnakeParseException(f'Found {len(classes)} concrete subclasses of SnakeParser and {len(methods)} methods named snakeparser in {workflow.snakefile}', exec_exception) elif ((len(classes) == 1) and (len(methods) == 0)): parser_class = classes[0] if issubclass(parser_class, SnakeArgumentParser): return parser_class(usage=argparse.SUPPRESS) else: return parser_class() else: assert ((len(classes) == 0) and (len(methods) == 1)), f'Bug: {len(classes)} != 0 and {len(methods)} != 1' parser_method = methods[0] parser = parser_method() if issubclass(parser.__class__, SnakeArgumentParser): return parser_method(usage=argparse.SUPPRESS) else: return parser
@staticmethod def parser_from(workflow: 'SnakeParseWorkflow') -> SnakeParser: parent_module_name = str(workflow.snakefile.resolve().parent) sys.path.insert(0, parent_module_name) exec_exception = None from snakemake.workflow import Workflow snakefile = str(workflow.snakefile) global config config = dict([(SnakeParse.ARGUMENT_FILE_NAME_KEY, None)]) globals_copy = dict(globals()) globals_copy['config'] = dict([(SnakeParse.ARGUMENT_FILE_NAME_KEY, None)]) globals_copy['workflow'] = Workflow(snakefile=snakefile) (code, linemap, rulecount) = snakemake_parser.parse(snakefile) code = compile(code, snakefile, 'exec') try: exec(code, globals_copy) except Exception as e: exec_exception = SnakeParseException(f'Could not compile {snakefile}', e) def classes_predicate(obj: Any) -> bool: return (inspect.isclass(obj) and (not inspect.isabstract(obj)) and (SnakeParser in inspect.getmro(obj))) def methods_predicate(key: str, obj: Any) -> bool: return ((key == 'snakeparser') and inspect.isfunction(obj)) classes = [obj for (key, obj) in globals_copy.items() if classes_predicate(obj)] methods = [obj for (key, obj) in globals_copy.items() if methods_predicate(key, obj)] if ((len(classes) + len(methods)) == 0): raise SnakeParseException(f'Could not find either a concrete subclass of SnakeParser or a method named "snakeparser" in {workflow.snakefile}', exec_exception) elif ((len(classes) + len(methods)) > 1): raise SnakeParseException(f'Found {len(classes)} concrete subclasses of SnakeParser and {len(methods)} methods named snakeparser in {workflow.snakefile}', exec_exception) elif ((len(classes) == 1) and (len(methods) == 0)): parser_class = classes[0] if issubclass(parser_class, SnakeArgumentParser): return parser_class(usage=argparse.SUPPRESS) else: return parser_class() else: assert ((len(classes) == 0) and (len(methods) == 1)), f'Bug: {len(classes)} != 0 and {len(methods)} != 1' parser_method = methods[0] parser = parser_method() if issubclass(parser.__class__, SnakeArgumentParser): return parser_method(usage=argparse.SUPPRESS) else: return parser<|docstring|>Builds the SnakeParser for the given workflow<|endoftext|>
b6dde4b09ac38818415161e85bbfaf8c2d0eda07e3c8b9666610acd8eef48be8
@staticmethod def config_parser(usage: str=argparse.SUPPRESS) -> argparse.ArgumentParser: 'Returns an :class:`~argparse.ArgumentParser` for the configuration options' class _ConfigParser(_ArgumentParser): def exit(self, status: int=0, message: str=None) -> None: raise SnakeParseException(message) parser = _ConfigParser(usage=usage, allow_abbrev=False) parser.add_argument('--config', help='The path to the snakeparse configuration file (can be JSON, YAML, or HOCON).', type=Path) parser.add_argument('--snakefile-globs', help='Optionally, or more glob strings specifying where SnakeMake (snakefile) files can be found', nargs='*', default=[]) parser.add_argument('--prog', help='The name of the tool-chain to use ont the command-line', default='snakeparse') parser.add_argument('--snakemake', help='The path to the snakemake executable, otherwise it should be on the system path', type=Path) parser.add_argument('--name-transform', help='Transform the name of the workflow from Snake case to Camel case("snake_to_camel") or vice versa ("camel_to_snake")', default='snake_to_camel') parser.add_argument('--parent-dir-is-group-name', help='In the last resort if no group name is found, use the name of the parent directory of the snakefile as the group name', type=bool, default=False) parser.add_argument('--extra-help', help='Produce help with extra debugging information', type=bool, default=False) return parser
Returns an :class:`~argparse.ArgumentParser` for the configuration options
src/snakeparse/api.py
config_parser
nh13/snakeparse
47
python
@staticmethod def config_parser(usage: str=argparse.SUPPRESS) -> argparse.ArgumentParser: class _ConfigParser(_ArgumentParser): def exit(self, status: int=0, message: str=None) -> None: raise SnakeParseException(message) parser = _ConfigParser(usage=usage, allow_abbrev=False) parser.add_argument('--config', help='The path to the snakeparse configuration file (can be JSON, YAML, or HOCON).', type=Path) parser.add_argument('--snakefile-globs', help='Optionally, or more glob strings specifying where SnakeMake (snakefile) files can be found', nargs='*', default=[]) parser.add_argument('--prog', help='The name of the tool-chain to use ont the command-line', default='snakeparse') parser.add_argument('--snakemake', help='The path to the snakemake executable, otherwise it should be on the system path', type=Path) parser.add_argument('--name-transform', help='Transform the name of the workflow from Snake case to Camel case("snake_to_camel") or vice versa ("camel_to_snake")', default='snake_to_camel') parser.add_argument('--parent-dir-is-group-name', help='In the last resort if no group name is found, use the name of the parent directory of the snakefile as the group name', type=bool, default=False) parser.add_argument('--extra-help', help='Produce help with extra debugging information', type=bool, default=False) return parser
@staticmethod def config_parser(usage: str=argparse.SUPPRESS) -> argparse.ArgumentParser: class _ConfigParser(_ArgumentParser): def exit(self, status: int=0, message: str=None) -> None: raise SnakeParseException(message) parser = _ConfigParser(usage=usage, allow_abbrev=False) parser.add_argument('--config', help='The path to the snakeparse configuration file (can be JSON, YAML, or HOCON).', type=Path) parser.add_argument('--snakefile-globs', help='Optionally, or more glob strings specifying where SnakeMake (snakefile) files can be found', nargs='*', default=[]) parser.add_argument('--prog', help='The name of the tool-chain to use ont the command-line', default='snakeparse') parser.add_argument('--snakemake', help='The path to the snakemake executable, otherwise it should be on the system path', type=Path) parser.add_argument('--name-transform', help='Transform the name of the workflow from Snake case to Camel case("snake_to_camel") or vice versa ("camel_to_snake")', default='snake_to_camel') parser.add_argument('--parent-dir-is-group-name', help='In the last resort if no group name is found, use the name of the parent directory of the snakefile as the group name', type=bool, default=False) parser.add_argument('--extra-help', help='Produce help with extra debugging information', type=bool, default=False) return parser<|docstring|>Returns an :class:`~argparse.ArgumentParser` for the configuration options<|endoftext|>
c7f419f871a3a6754d6794bcd28920f9d91a7a75808e196a941aef98bdadaf80
def run(self) -> None: 'Execute the Snakemake workflow' snakemake = (self.config.snakemake if self.config.snakemake else 'snakemake') retcode = subprocess.call(([str(snakemake)] + self.snakemake_args)) self.snakeparse_args_file.unlink() sys.exit(retcode)
Execute the Snakemake workflow
src/snakeparse/api.py
run
nh13/snakeparse
47
python
def run(self) -> None: snakemake = (self.config.snakemake if self.config.snakemake else 'snakemake') retcode = subprocess.call(([str(snakemake)] + self.snakemake_args)) self.snakeparse_args_file.unlink() sys.exit(retcode)
def run(self) -> None: snakemake = (self.config.snakemake if self.config.snakemake else 'snakemake') retcode = subprocess.call(([str(snakemake)] + self.snakemake_args)) self.snakeparse_args_file.unlink() sys.exit(retcode)<|docstring|>Execute the Snakemake workflow<|endoftext|>
00570cfc7db567d8bc3eba7497d6bac2113990d279e8a817c68350bd1b1c57b7
def _parse_workflow_args(self, workflow: 'SnakeParseWorkflow', args_file: Path) -> None: 'Dynamically loads the module containing the workflow parser and\n attempts to parse the arguments in the given arguments file.\n\n The module must have a single concrete class implementing SnakeParser.\n ' parser = self.config.parser_from(workflow=workflow) try: parser.parse_args_file(args_file=args_file) except SnakeParseException as e: self._print_workflow_help(workflow=workflow, parser=parser, message=str(e)) except SystemExit: self._print_workflow_help(workflow=workflow, parser=parser, message=None)
Dynamically loads the module containing the workflow parser and attempts to parse the arguments in the given arguments file. The module must have a single concrete class implementing SnakeParser.
src/snakeparse/api.py
_parse_workflow_args
nh13/snakeparse
47
python
def _parse_workflow_args(self, workflow: 'SnakeParseWorkflow', args_file: Path) -> None: 'Dynamically loads the module containing the workflow parser and\n attempts to parse the arguments in the given arguments file.\n\n The module must have a single concrete class implementing SnakeParser.\n ' parser = self.config.parser_from(workflow=workflow) try: parser.parse_args_file(args_file=args_file) except SnakeParseException as e: self._print_workflow_help(workflow=workflow, parser=parser, message=str(e)) except SystemExit: self._print_workflow_help(workflow=workflow, parser=parser, message=None)
def _parse_workflow_args(self, workflow: 'SnakeParseWorkflow', args_file: Path) -> None: 'Dynamically loads the module containing the workflow parser and\n attempts to parse the arguments in the given arguments file.\n\n The module must have a single concrete class implementing SnakeParser.\n ' parser = self.config.parser_from(workflow=workflow) try: parser.parse_args_file(args_file=args_file) except SnakeParseException as e: self._print_workflow_help(workflow=workflow, parser=parser, message=str(e)) except SystemExit: self._print_workflow_help(workflow=workflow, parser=parser, message=None)<|docstring|>Dynamically loads the module containing the workflow parser and attempts to parse the arguments in the given arguments file. The module must have a single concrete class implementing SnakeParser.<|endoftext|>
81572d71f550bcf624c055299056e16372cab9db7810d61fdf5de0d3ad74a128
def _print_workflow_help(self, workflow: 'SnakeParseWorkflow', parser: 'SnakeParser', message: Optional[str]=None) -> None: 'Prints the help message with all available workflows and the workflow\n specific help.' self._usage(exit=False) self.file.write(f''' {workflow.name} Arguments: ''') self._print_line() self.file.write('\n') parser.print_help(file=self.file) if message: self.file.write(f''' error: {message}''') self.file.write('\n') sys.exit(2)
Prints the help message with all available workflows and the workflow specific help.
src/snakeparse/api.py
_print_workflow_help
nh13/snakeparse
47
python
def _print_workflow_help(self, workflow: 'SnakeParseWorkflow', parser: 'SnakeParser', message: Optional[str]=None) -> None: 'Prints the help message with all available workflows and the workflow\n specific help.' self._usage(exit=False) self.file.write(f' {workflow.name} Arguments: ') self._print_line() self.file.write('\n') parser.print_help(file=self.file) if message: self.file.write(f' error: {message}') self.file.write('\n') sys.exit(2)
def _print_workflow_help(self, workflow: 'SnakeParseWorkflow', parser: 'SnakeParser', message: Optional[str]=None) -> None: 'Prints the help message with all available workflows and the workflow\n specific help.' self._usage(exit=False) self.file.write(f' {workflow.name} Arguments: ') self._print_line() self.file.write('\n') parser.print_help(file=self.file) if message: self.file.write(f' error: {message}') self.file.write('\n') sys.exit(2)<|docstring|>Prints the help message with all available workflows and the workflow specific help.<|endoftext|>
3a1412927a166dc69ad58e6a9c6b8e70db769fcf5b954a8f36f4cea9c065d45e
@staticmethod def _parse_known_args(parser: argparse.ArgumentParser, args: Sequence[str]) -> Tuple[(int, argparse.Namespace)]: 'Parses the args with the given parsers until an unknown argument is\n encountered.' if (not args): (namespace, remaining) = parser.parse_known_args(args=args) return (1, namespace) namespace = argparse.Namespace() end = 1 while (end <= len(args)): try: (namespace, remaining) = parser.parse_known_args(args=args[:end]) if remaining: return ((end - 1), namespace) except SnakeParseException: pass end += 1 return (end, namespace)
Parses the args with the given parsers until an unknown argument is encountered.
src/snakeparse/api.py
_parse_known_args
nh13/snakeparse
47
python
@staticmethod def _parse_known_args(parser: argparse.ArgumentParser, args: Sequence[str]) -> Tuple[(int, argparse.Namespace)]: 'Parses the args with the given parsers until an unknown argument is\n encountered.' if (not args): (namespace, remaining) = parser.parse_known_args(args=args) return (1, namespace) namespace = argparse.Namespace() end = 1 while (end <= len(args)): try: (namespace, remaining) = parser.parse_known_args(args=args[:end]) if remaining: return ((end - 1), namespace) except SnakeParseException: pass end += 1 return (end, namespace)
@staticmethod def _parse_known_args(parser: argparse.ArgumentParser, args: Sequence[str]) -> Tuple[(int, argparse.Namespace)]: 'Parses the args with the given parsers until an unknown argument is\n encountered.' if (not args): (namespace, remaining) = parser.parse_known_args(args=args) return (1, namespace) namespace = argparse.Namespace() end = 1 while (end <= len(args)): try: (namespace, remaining) = parser.parse_known_args(args=args[:end]) if remaining: return ((end - 1), namespace) except SnakeParseException: pass end += 1 return (end, namespace)<|docstring|>Parses the args with the given parsers until an unknown argument is encountered.<|endoftext|>
6177a944de600c869f96e966eacd8403f85b7c0fea94c56b31000d9b709cbe71
@staticmethod def usage_short(prog: str='snakeparse', workflow_name: str=None) -> str: 'A one line usage to display at the top of any usage or help message.' if (workflow_name is None): workflow_name = '[workflow name]' return f'{prog} [snakeparse options] [snakemake options] {workflow_name} [workflow options]'.lstrip(' ')
A one line usage to display at the top of any usage or help message.
src/snakeparse/api.py
usage_short
nh13/snakeparse
47
python
@staticmethod def usage_short(prog: str='snakeparse', workflow_name: str=None) -> str: if (workflow_name is None): workflow_name = '[workflow name]' return f'{prog} [snakeparse options] [snakemake options] {workflow_name} [workflow options]'.lstrip(' ')
@staticmethod def usage_short(prog: str='snakeparse', workflow_name: str=None) -> str: if (workflow_name is None): workflow_name = '[workflow name]' return f'{prog} [snakeparse options] [snakemake options] {workflow_name} [workflow options]'.lstrip(' ')<|docstring|>A one line usage to display at the top of any usage or help message.<|endoftext|>
da6939b99b3a1d2aaa3f28bc2f5b88c94d827afa5fd8c44e6215e14679352a84
def _usage(self, message: Optional[str]=None, exit: bool=True) -> None: 'The long usage that lists all the available workflows.' terminal_size = shutil.get_terminal_size(fallback=(80, 24)) group_name_columns = 38 group_description_columns = (terminal_size.columns - group_name_columns) workflow_name_columns = (group_name_columns - 3) workflow_description_columns = (group_description_columns - 1) self.file.write((('Usage: ' + self._usage_short()) + '\n')) self.file.write(f'''Version: {__version__} ''') if self._config_usage: self.file.write('\n') SnakeParseConfig.config_parser().print_help(file=self.file, suppress=False) if self.config.workflows: self.file.write('\nAvailable Workflows:\n') self._print_line() groups = OrderedDict(self.config.groups) for wf in self.config.workflows.values(): if (wf.group not in groups): groups[wf.group] = None for (group, desc) in groups.items(): name = (('Worfklows' if (group is None) else group) + ':') desc = ('' if (desc is None) else desc) self.file.write(f'''{name:<{group_name_columns}}{desc:<{group_description_columns}} ''') for wf in self.config.workflows.values(): if (wf.group != group): continue desc = (str(wf.snakefile) if (wf.description is None) else wf.description) self.file.write(f''' {wf.name:<{workflow_name_columns}}{desc:<{workflow_description_columns}} ''') if self.debug: self.file.write(f''' snakefile: {wf.snakefile} ''') self._print_line() else: self.file.write('\nNo workflows configured.\n') self._print_line() if (message is not None): self.file.write(f''' {message} ''') if exit: sys.exit(2)
The long usage that lists all the available workflows.
src/snakeparse/api.py
_usage
nh13/snakeparse
47
python
def _usage(self, message: Optional[str]=None, exit: bool=True) -> None: terminal_size = shutil.get_terminal_size(fallback=(80, 24)) group_name_columns = 38 group_description_columns = (terminal_size.columns - group_name_columns) workflow_name_columns = (group_name_columns - 3) workflow_description_columns = (group_description_columns - 1) self.file.write((('Usage: ' + self._usage_short()) + '\n')) self.file.write(f'Version: {__version__} ') if self._config_usage: self.file.write('\n') SnakeParseConfig.config_parser().print_help(file=self.file, suppress=False) if self.config.workflows: self.file.write('\nAvailable Workflows:\n') self._print_line() groups = OrderedDict(self.config.groups) for wf in self.config.workflows.values(): if (wf.group not in groups): groups[wf.group] = None for (group, desc) in groups.items(): name = (('Worfklows' if (group is None) else group) + ':') desc = ( if (desc is None) else desc) self.file.write(f'{name:<{group_name_columns}}{desc:<{group_description_columns}} ') for wf in self.config.workflows.values(): if (wf.group != group): continue desc = (str(wf.snakefile) if (wf.description is None) else wf.description) self.file.write(f' {wf.name:<{workflow_name_columns}}{desc:<{workflow_description_columns}} ') if self.debug: self.file.write(f' snakefile: {wf.snakefile} ') self._print_line() else: self.file.write('\nNo workflows configured.\n') self._print_line() if (message is not None): self.file.write(f' {message} ') if exit: sys.exit(2)
def _usage(self, message: Optional[str]=None, exit: bool=True) -> None: terminal_size = shutil.get_terminal_size(fallback=(80, 24)) group_name_columns = 38 group_description_columns = (terminal_size.columns - group_name_columns) workflow_name_columns = (group_name_columns - 3) workflow_description_columns = (group_description_columns - 1) self.file.write((('Usage: ' + self._usage_short()) + '\n')) self.file.write(f'Version: {__version__} ') if self._config_usage: self.file.write('\n') SnakeParseConfig.config_parser().print_help(file=self.file, suppress=False) if self.config.workflows: self.file.write('\nAvailable Workflows:\n') self._print_line() groups = OrderedDict(self.config.groups) for wf in self.config.workflows.values(): if (wf.group not in groups): groups[wf.group] = None for (group, desc) in groups.items(): name = (('Worfklows' if (group is None) else group) + ':') desc = ( if (desc is None) else desc) self.file.write(f'{name:<{group_name_columns}}{desc:<{group_description_columns}} ') for wf in self.config.workflows.values(): if (wf.group != group): continue desc = (str(wf.snakefile) if (wf.description is None) else wf.description) self.file.write(f' {wf.name:<{workflow_name_columns}}{desc:<{workflow_description_columns}} ') if self.debug: self.file.write(f' snakefile: {wf.snakefile} ') self._print_line() else: self.file.write('\nNo workflows configured.\n') self._print_line() if (message is not None): self.file.write(f' {message} ') if exit: sys.exit(2)<|docstring|>The long usage that lists all the available workflows.<|endoftext|>
c19045eb9832add32f16ed9f7321b2cfa6e60f6131e1bbbe5fc940c180c0c280
def makesocket(path): 'Create a socket file, return True successfully, fail to return False.' if (not os.path.exists(path)): try: sock = socket.socket(socket.AF_UNIX) sock.bind(path) except Exception as e: return False return True return False
Create a socket file, return True successfully, fail to return False.
py/makesocket.py
makesocket
rgb-24bit/scripts
0
python
def makesocket(path): if (not os.path.exists(path)): try: sock = socket.socket(socket.AF_UNIX) sock.bind(path) except Exception as e: return False return True return False
def makesocket(path): if (not os.path.exists(path)): try: sock = socket.socket(socket.AF_UNIX) sock.bind(path) except Exception as e: return False return True return False<|docstring|>Create a socket file, return True successfully, fail to return False.<|endoftext|>
02ce0855768f2e857952c5def51151dd0e0a0468e836c5e2bb2a48c3e6685075
def parse_args(): 'Command line arguments parsing.' parser = argparse.ArgumentParser(prog='makesocket', description=DESCRIPTION) parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + VERSION)) parser.add_argument('-p', '--path', action='store', dest='path', type=str, default=None, help='Specify the path to the socket file to be created') return parser.parse_args()
Command line arguments parsing.
py/makesocket.py
parse_args
rgb-24bit/scripts
0
python
def parse_args(): parser = argparse.ArgumentParser(prog='makesocket', description=DESCRIPTION) parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + VERSION)) parser.add_argument('-p', '--path', action='store', dest='path', type=str, default=None, help='Specify the path to the socket file to be created') return parser.parse_args()
def parse_args(): parser = argparse.ArgumentParser(prog='makesocket', description=DESCRIPTION) parser.add_argument('-v', '--version', action='version', version=('%(prog)s ' + VERSION)) parser.add_argument('-p', '--path', action='store', dest='path', type=str, default=None, help='Specify the path to the socket file to be created') return parser.parse_args()<|docstring|>Command line arguments parsing.<|endoftext|>
9a83a98d88e3ad73b669582ed998a4a9ced0e5a044acdf35fb059b34c4156204
@staticmethod def _to_state_space(tau, dt=0.05): '\n Args:\n tau (float): time constant\n dt (float): discrte time\n Returns:\n A (numpy.ndarray): discrete A matrix \n B (numpy.ndarray): discrete B matrix \n ' Ac = np.array([[((- 1.0) / tau), 0.0, 0.0, 0.0], [0.0, ((- 1.0) / tau), 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]) Bc = np.array([[(1.0 / tau), 0.0], [0.0, (1.0 / tau)], [0.0, 0.0], [0.0, 0.0]]) A = scipy.linalg.expm((dt * Ac)) B = np.zeros_like(Bc) for m in range(Bc.shape[0]): for n in range(Bc.shape[1]): integrate_fn = (lambda tau: np.matmul(scipy.linalg.expm((Ac * tau)), Bc)[(m, n)]) sol = integrate.quad(integrate_fn, 0, dt) B[(m, n)] = sol[0] return (A, B)
Args: tau (float): time constant dt (float): discrte time Returns: A (numpy.ndarray): discrete A matrix B (numpy.ndarray): discrete B matrix
PythonLinearNonlinearControl/envs/first_order_lag.py
_to_state_space
Geonhee-LEE/PythonLinearNonlinearControl
425
python
@staticmethod def _to_state_space(tau, dt=0.05): '\n Args:\n tau (float): time constant\n dt (float): discrte time\n Returns:\n A (numpy.ndarray): discrete A matrix \n B (numpy.ndarray): discrete B matrix \n ' Ac = np.array([[((- 1.0) / tau), 0.0, 0.0, 0.0], [0.0, ((- 1.0) / tau), 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]) Bc = np.array([[(1.0 / tau), 0.0], [0.0, (1.0 / tau)], [0.0, 0.0], [0.0, 0.0]]) A = scipy.linalg.expm((dt * Ac)) B = np.zeros_like(Bc) for m in range(Bc.shape[0]): for n in range(Bc.shape[1]): integrate_fn = (lambda tau: np.matmul(scipy.linalg.expm((Ac * tau)), Bc)[(m, n)]) sol = integrate.quad(integrate_fn, 0, dt) B[(m, n)] = sol[0] return (A, B)
@staticmethod def _to_state_space(tau, dt=0.05): '\n Args:\n tau (float): time constant\n dt (float): discrte time\n Returns:\n A (numpy.ndarray): discrete A matrix \n B (numpy.ndarray): discrete B matrix \n ' Ac = np.array([[((- 1.0) / tau), 0.0, 0.0, 0.0], [0.0, ((- 1.0) / tau), 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]) Bc = np.array([[(1.0 / tau), 0.0], [0.0, (1.0 / tau)], [0.0, 0.0], [0.0, 0.0]]) A = scipy.linalg.expm((dt * Ac)) B = np.zeros_like(Bc) for m in range(Bc.shape[0]): for n in range(Bc.shape[1]): integrate_fn = (lambda tau: np.matmul(scipy.linalg.expm((Ac * tau)), Bc)[(m, n)]) sol = integrate.quad(integrate_fn, 0, dt) B[(m, n)] = sol[0] return (A, B)<|docstring|>Args: tau (float): time constant dt (float): discrte time Returns: A (numpy.ndarray): discrete A matrix B (numpy.ndarray): discrete B matrix<|endoftext|>
c9aacfda65f300cb3cd90067c2b0693390f3e237755ba46b0488898dce9ac5a4
def reset(self, init_x=None): ' reset state\n Returns:\n init_x (numpy.ndarray): initial state, shape(state_size, ) \n info (dict): information\n ' self.step_count = 0 self.curr_x = np.zeros(self.config['state_size']) if (init_x is not None): self.curr_x = init_x self.g_x = np.array([0.0, 0, (- 2.0), 3.0]) self.history_x = [] self.history_g_x = [] return (self.curr_x, {'goal_state': self.g_x})
reset state Returns: init_x (numpy.ndarray): initial state, shape(state_size, ) info (dict): information
PythonLinearNonlinearControl/envs/first_order_lag.py
reset
Geonhee-LEE/PythonLinearNonlinearControl
425
python
def reset(self, init_x=None): ' reset state\n Returns:\n init_x (numpy.ndarray): initial state, shape(state_size, ) \n info (dict): information\n ' self.step_count = 0 self.curr_x = np.zeros(self.config['state_size']) if (init_x is not None): self.curr_x = init_x self.g_x = np.array([0.0, 0, (- 2.0), 3.0]) self.history_x = [] self.history_g_x = [] return (self.curr_x, {'goal_state': self.g_x})
def reset(self, init_x=None): ' reset state\n Returns:\n init_x (numpy.ndarray): initial state, shape(state_size, ) \n info (dict): information\n ' self.step_count = 0 self.curr_x = np.zeros(self.config['state_size']) if (init_x is not None): self.curr_x = init_x self.g_x = np.array([0.0, 0, (- 2.0), 3.0]) self.history_x = [] self.history_g_x = [] return (self.curr_x, {'goal_state': self.g_x})<|docstring|>reset state Returns: init_x (numpy.ndarray): initial state, shape(state_size, ) info (dict): information<|endoftext|>
dd0ca5b44f6e12f8d78e98e1329d457ff60cc66ab40e327c8eb1631fe5b1243a
def step(self, u): '\n Args:\n u (numpy.ndarray) : input, shape(input_size, )\n Returns:\n next_x (numpy.ndarray): next state, shape(state_size, ) \n cost (float): costs\n done (bool): end the simulation or not\n info (dict): information \n ' u = np.clip(u, self.config['input_lower_bound'], self.config['input_upper_bound']) next_x = (np.matmul(self.A, self.curr_x[(:, np.newaxis)]) + np.matmul(self.B, u[(:, np.newaxis)])) cost = 0 cost = np.sum((u ** 2)) cost += np.sum(((self.curr_x - self.g_x) ** 2)) self.history_x.append(next_x.flatten()) self.history_g_x.append(self.g_x.flatten()) self.curr_x = next_x.flatten() self.step_count += 1 return (next_x.flatten(), cost, (self.step_count > self.config['max_step']), {'goal_state': self.g_x})
Args: u (numpy.ndarray) : input, shape(input_size, ) Returns: next_x (numpy.ndarray): next state, shape(state_size, ) cost (float): costs done (bool): end the simulation or not info (dict): information
PythonLinearNonlinearControl/envs/first_order_lag.py
step
Geonhee-LEE/PythonLinearNonlinearControl
425
python
def step(self, u): '\n Args:\n u (numpy.ndarray) : input, shape(input_size, )\n Returns:\n next_x (numpy.ndarray): next state, shape(state_size, ) \n cost (float): costs\n done (bool): end the simulation or not\n info (dict): information \n ' u = np.clip(u, self.config['input_lower_bound'], self.config['input_upper_bound']) next_x = (np.matmul(self.A, self.curr_x[(:, np.newaxis)]) + np.matmul(self.B, u[(:, np.newaxis)])) cost = 0 cost = np.sum((u ** 2)) cost += np.sum(((self.curr_x - self.g_x) ** 2)) self.history_x.append(next_x.flatten()) self.history_g_x.append(self.g_x.flatten()) self.curr_x = next_x.flatten() self.step_count += 1 return (next_x.flatten(), cost, (self.step_count > self.config['max_step']), {'goal_state': self.g_x})
def step(self, u): '\n Args:\n u (numpy.ndarray) : input, shape(input_size, )\n Returns:\n next_x (numpy.ndarray): next state, shape(state_size, ) \n cost (float): costs\n done (bool): end the simulation or not\n info (dict): information \n ' u = np.clip(u, self.config['input_lower_bound'], self.config['input_upper_bound']) next_x = (np.matmul(self.A, self.curr_x[(:, np.newaxis)]) + np.matmul(self.B, u[(:, np.newaxis)])) cost = 0 cost = np.sum((u ** 2)) cost += np.sum(((self.curr_x - self.g_x) ** 2)) self.history_x.append(next_x.flatten()) self.history_g_x.append(self.g_x.flatten()) self.curr_x = next_x.flatten() self.step_count += 1 return (next_x.flatten(), cost, (self.step_count > self.config['max_step']), {'goal_state': self.g_x})<|docstring|>Args: u (numpy.ndarray) : input, shape(input_size, ) Returns: next_x (numpy.ndarray): next state, shape(state_size, ) cost (float): costs done (bool): end the simulation or not info (dict): information<|endoftext|>
4f8a2f3ddab0ef5970ec6577cffb93b72f11aeb0c64264d55cf46db7efc281f7
def dsa_view(redirect_name=None): 'Decorate djangos-social-auth views. Will check and retrieve backend\n or return HttpResponseServerError if backend is not found.\n\n redirect_name parameter is used to build redirect URL used by backend.\n ' def dec(func): @wraps(func) def wrapper(request, backend, *args, **kwargs): if redirect_name: redirect = reverse(redirect_name, args=(backend,)) else: redirect = request.path request.social_auth_backend = get_backend(backend, request, redirect) if (request.social_auth_backend is None): raise WrongBackend(backend) return func(request, request.social_auth_backend, *args, **kwargs) return wrapper return dec
Decorate djangos-social-auth views. Will check and retrieve backend or return HttpResponseServerError if backend is not found. redirect_name parameter is used to build redirect URL used by backend.
src/social_auth/decorators.py
dsa_view
legalosLOTR/sentry
4
python
def dsa_view(redirect_name=None): 'Decorate djangos-social-auth views. Will check and retrieve backend\n or return HttpResponseServerError if backend is not found.\n\n redirect_name parameter is used to build redirect URL used by backend.\n ' def dec(func): @wraps(func) def wrapper(request, backend, *args, **kwargs): if redirect_name: redirect = reverse(redirect_name, args=(backend,)) else: redirect = request.path request.social_auth_backend = get_backend(backend, request, redirect) if (request.social_auth_backend is None): raise WrongBackend(backend) return func(request, request.social_auth_backend, *args, **kwargs) return wrapper return dec
def dsa_view(redirect_name=None): 'Decorate djangos-social-auth views. Will check and retrieve backend\n or return HttpResponseServerError if backend is not found.\n\n redirect_name parameter is used to build redirect URL used by backend.\n ' def dec(func): @wraps(func) def wrapper(request, backend, *args, **kwargs): if redirect_name: redirect = reverse(redirect_name, args=(backend,)) else: redirect = request.path request.social_auth_backend = get_backend(backend, request, redirect) if (request.social_auth_backend is None): raise WrongBackend(backend) return func(request, request.social_auth_backend, *args, **kwargs) return wrapper return dec<|docstring|>Decorate djangos-social-auth views. Will check and retrieve backend or return HttpResponseServerError if backend is not found. redirect_name parameter is used to build redirect URL used by backend.<|endoftext|>
3565921a6660ed7d652393a4f67ac283ef1ef5e4db75d01564f8ec17759e5ec3
@persist_csv(target=(BASE_RESULTS_DIR + '/vte_whitened_scores.csv'), enabled=True, out_transform=(lambda x: x[2])) @persist_json(target=(BASE_RESULTS_DIR + '/vte_whitened_params.json'), enabled=True, out_transform=(lambda x: x[1])) @persist_pickle(target=(BASE_RESULTS_DIR + '/vte_whitened.pickle'), enabled=True, out_transform=(lambda x: x[0])) def fit_pca_whitened_classifiers(cv=5, n_jobs=(- 1), verbose=False, report=False, random_seed=None): 'Fit classifiers to [non-]undersampled PCA-whitened input data.\n \n .. note:: Spits a lot of ``liblinear`` convergence warnings.\n\n We start with the top 7 columns by univariate ROC AUC for the VTE data.\n We perform a whitening PCA transform of the data and then fit classifiers\n with balanced class weights. Formerly oversampling of the minority class\n was done with the use of a :class:`sklearn.model_selection.PredefinedSplit`\n to prevent the oversampled data from leaking into the validation sets\n during the grid search (all oversampled data appended to end of training\n set and now allowed to be part of validation sets), but the improvement was\n not as much as one would have hoped (actually worse). So we ended up going\n back to just using balanced class weights.\n\n Use 5-fold (by default) cross-validation to choose the best parameters,\n refit on best, evaluate accuracy, precision, recall, ROC AUC.\n\n Note that we need a scaler before doing PCA. Use F1 score to pick model.\n\n :param cv: Number of CV splits to make when doing grid search.\n :type cv: int, optional\n :param n_jobs: Number of jobs to run in parallel when grid searching.\n Defaults to ``-1`` to distribute load to all threads.\n :type n_jobs: int, optional\n :param verbose: Verbosity of the\n :class:`~sklearn.model_selection.GridSearchCV` during searching/fitting.\n :type verbose: bool, optional\n :param report: If ``True``, print to stdout a report on model scores.\n :type report: bool, optional\n :param random_seed: A int seed to pass for multiple calls to this function\n to be reproducible. Leave ``None`` for stochastic behavior.\n :type random_state: int, optional\n :rtype: tuple\n ' if (cv < 3): raise ValueError('cv folds must be 3 or more') best_cols = list(pd.read_csv((BASE_RESULTS_DIR + '/vte_selected_cols.csv'), index_col=0).index) (X_train, X_test, y_train, y_test) = vte_slp_factory(data_transform=replace_hdl_tot_chol_with_ratio, inputs=best_cols, targets=VTE_OUTPUT_COLS, dropna=True, random_state=random_seed) scaler = StandardScaler() scaler.fit(X_train) (X_train, X_test) = (scaler.transform(X_train), scaler.transform(X_test)) pca = PCA(whiten=True, random_state=random_seed) pca.fit(X_train) (X_train, X_test) = (pca.transform(X_train), pca.transform(X_test)) base_names = ('l2_logistic', 'l2_linsvc', 'bagged_l2_logistic', 'bagged_l2_linsvc', 'rbf_svc', 'xgboost', 'random_forest') lrc_l2_grid = dict(penalty=['l2'], C=[1], fit_intercept=[True], max_iter=[100], class_weight=['balanced']) lsvc_l2_grid = dict(penalty=['l2'], loss=['hinge', 'squared_hinge'], dual=[True], random_state=[random_seed], C=[1, 5, 10], fit_intercept=[True], class_weight=['balanced']) bag_lrc_l2_grid = dict(base_estimator=[LogisticRegression(fit_intercept=True, class_weight='balanced')], n_estimators=[100, 200, 400], random_state=[random_seed]) bag_lsvc_l2_grid = dict(base_estimator=[LinearSVC(loss='hinge', fit_intercept=True, class_weight='balanced', random_state=random_seed)], n_estimators=[100, 200, 400], random_state=[random_seed]) rbf_svc_grid = dict(C=[0.1, 1, 5], kernel=['rbf'], gamma=['scale', 'auto'], class_weight=['balanced']) neg_pos_ratio = ((y_train == 0).sum() / (y_train == 1).sum()) xgb_grid = dict(max_depth=[3], n_estimators=[400, 600, 800], learning_rate=[0.1], booster=['gbtree'], subsample=[0.5], reg_lambda=[0.1, 1], random_state=[random_seed], scale_pos_weight=[neg_pos_ratio]) rf_grid = dict(max_depth=[6, 12, 24], n_estimators=[100, 200, 400], criterion=['entropy'], random_state=[random_seed], class_weight=['balanced']) base_models = (LogisticRegression(), LinearSVC(), BaggingClassifier(), BaggingClassifier(), SVC(), XGBClassifier(), RandomForestClassifier()) base_names = ('l2_logistic', 'l2_linsvc', 'bagged_l2_logistic', 'bagged_l2_linsvc', 'rbf_svc', 'xgboost', 'random_forest') param_grids = (lrc_l2_grid, lsvc_l2_grid, bag_lrc_l2_grid, bag_lsvc_l2_grid, rbf_svc_grid, xgb_grid, rf_grid) mdata = {} mparams = {} mscores = pd.DataFrame(index=base_names, columns=['accuracy', 'precision', 'recall', 'roc_auc']) for (base_name, base_model, param_grid) in zip(base_names, base_models, param_grids): model = GridSearchCV(base_model, param_grid, scoring='f1', cv=cv, n_jobs=n_jobs, verbose=int(verbose)) model.fit(X_train, y_train) mdata[base_name] = model params = model.best_estimator_.get_params() for name in params.keys(): if hasattr(params[name], 'get_params'): params[name] = params[name].get_params() mparams[base_name] = params y_pred = model.predict(X_test) if hasattr(model, 'decision_function'): y_pred_scores = model.decision_function(X_test) elif hasattr(model, 'predict_proba'): y_pred_scores = model.predict_proba(X_test)[(:, 1)] else: print(f"warning: {model.__class__.__name__} can't compute ROC AUC score; does not have decision_function or predict_proba", file=sys.stderr) y_pred_scores = None mscores.loc[(base_name, :)] = (accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), (np.nan if (y_pred_scores is None) else roc_auc_score(y_test, y_pred_scores))) if report: print('---- classifier quality metrics ', end='') print(('-' * 48), end='\n\n') print(mscores) return (mdata, mparams, mscores)
Fit classifiers to [non-]undersampled PCA-whitened input data. .. note:: Spits a lot of ``liblinear`` convergence warnings. We start with the top 7 columns by univariate ROC AUC for the VTE data. We perform a whitening PCA transform of the data and then fit classifiers with balanced class weights. Formerly oversampling of the minority class was done with the use of a :class:`sklearn.model_selection.PredefinedSplit` to prevent the oversampled data from leaking into the validation sets during the grid search (all oversampled data appended to end of training set and now allowed to be part of validation sets), but the improvement was not as much as one would have hoped (actually worse). So we ended up going back to just using balanced class weights. Use 5-fold (by default) cross-validation to choose the best parameters, refit on best, evaluate accuracy, precision, recall, ROC AUC. Note that we need a scaler before doing PCA. Use F1 score to pick model. :param cv: Number of CV splits to make when doing grid search. :type cv: int, optional :param n_jobs: Number of jobs to run in parallel when grid searching. Defaults to ``-1`` to distribute load to all threads. :type n_jobs: int, optional :param verbose: Verbosity of the :class:`~sklearn.model_selection.GridSearchCV` during searching/fitting. :type verbose: bool, optional :param report: If ``True``, print to stdout a report on model scores. :type report: bool, optional :param random_seed: A int seed to pass for multiple calls to this function to be reproducible. Leave ``None`` for stochastic behavior. :type random_state: int, optional :rtype: tuple
mtml/modeling/vte/mixed_models.py
fit_pca_whitened_classifiers
crb479/mcdevitt-trauma-ml
5
python
@persist_csv(target=(BASE_RESULTS_DIR + '/vte_whitened_scores.csv'), enabled=True, out_transform=(lambda x: x[2])) @persist_json(target=(BASE_RESULTS_DIR + '/vte_whitened_params.json'), enabled=True, out_transform=(lambda x: x[1])) @persist_pickle(target=(BASE_RESULTS_DIR + '/vte_whitened.pickle'), enabled=True, out_transform=(lambda x: x[0])) def fit_pca_whitened_classifiers(cv=5, n_jobs=(- 1), verbose=False, report=False, random_seed=None): 'Fit classifiers to [non-]undersampled PCA-whitened input data.\n \n .. note:: Spits a lot of ``liblinear`` convergence warnings.\n\n We start with the top 7 columns by univariate ROC AUC for the VTE data.\n We perform a whitening PCA transform of the data and then fit classifiers\n with balanced class weights. Formerly oversampling of the minority class\n was done with the use of a :class:`sklearn.model_selection.PredefinedSplit`\n to prevent the oversampled data from leaking into the validation sets\n during the grid search (all oversampled data appended to end of training\n set and now allowed to be part of validation sets), but the improvement was\n not as much as one would have hoped (actually worse). So we ended up going\n back to just using balanced class weights.\n\n Use 5-fold (by default) cross-validation to choose the best parameters,\n refit on best, evaluate accuracy, precision, recall, ROC AUC.\n\n Note that we need a scaler before doing PCA. Use F1 score to pick model.\n\n :param cv: Number of CV splits to make when doing grid search.\n :type cv: int, optional\n :param n_jobs: Number of jobs to run in parallel when grid searching.\n Defaults to ``-1`` to distribute load to all threads.\n :type n_jobs: int, optional\n :param verbose: Verbosity of the\n :class:`~sklearn.model_selection.GridSearchCV` during searching/fitting.\n :type verbose: bool, optional\n :param report: If ``True``, print to stdout a report on model scores.\n :type report: bool, optional\n :param random_seed: A int seed to pass for multiple calls to this function\n to be reproducible. Leave ``None`` for stochastic behavior.\n :type random_state: int, optional\n :rtype: tuple\n ' if (cv < 3): raise ValueError('cv folds must be 3 or more') best_cols = list(pd.read_csv((BASE_RESULTS_DIR + '/vte_selected_cols.csv'), index_col=0).index) (X_train, X_test, y_train, y_test) = vte_slp_factory(data_transform=replace_hdl_tot_chol_with_ratio, inputs=best_cols, targets=VTE_OUTPUT_COLS, dropna=True, random_state=random_seed) scaler = StandardScaler() scaler.fit(X_train) (X_train, X_test) = (scaler.transform(X_train), scaler.transform(X_test)) pca = PCA(whiten=True, random_state=random_seed) pca.fit(X_train) (X_train, X_test) = (pca.transform(X_train), pca.transform(X_test)) base_names = ('l2_logistic', 'l2_linsvc', 'bagged_l2_logistic', 'bagged_l2_linsvc', 'rbf_svc', 'xgboost', 'random_forest') lrc_l2_grid = dict(penalty=['l2'], C=[1], fit_intercept=[True], max_iter=[100], class_weight=['balanced']) lsvc_l2_grid = dict(penalty=['l2'], loss=['hinge', 'squared_hinge'], dual=[True], random_state=[random_seed], C=[1, 5, 10], fit_intercept=[True], class_weight=['balanced']) bag_lrc_l2_grid = dict(base_estimator=[LogisticRegression(fit_intercept=True, class_weight='balanced')], n_estimators=[100, 200, 400], random_state=[random_seed]) bag_lsvc_l2_grid = dict(base_estimator=[LinearSVC(loss='hinge', fit_intercept=True, class_weight='balanced', random_state=random_seed)], n_estimators=[100, 200, 400], random_state=[random_seed]) rbf_svc_grid = dict(C=[0.1, 1, 5], kernel=['rbf'], gamma=['scale', 'auto'], class_weight=['balanced']) neg_pos_ratio = ((y_train == 0).sum() / (y_train == 1).sum()) xgb_grid = dict(max_depth=[3], n_estimators=[400, 600, 800], learning_rate=[0.1], booster=['gbtree'], subsample=[0.5], reg_lambda=[0.1, 1], random_state=[random_seed], scale_pos_weight=[neg_pos_ratio]) rf_grid = dict(max_depth=[6, 12, 24], n_estimators=[100, 200, 400], criterion=['entropy'], random_state=[random_seed], class_weight=['balanced']) base_models = (LogisticRegression(), LinearSVC(), BaggingClassifier(), BaggingClassifier(), SVC(), XGBClassifier(), RandomForestClassifier()) base_names = ('l2_logistic', 'l2_linsvc', 'bagged_l2_logistic', 'bagged_l2_linsvc', 'rbf_svc', 'xgboost', 'random_forest') param_grids = (lrc_l2_grid, lsvc_l2_grid, bag_lrc_l2_grid, bag_lsvc_l2_grid, rbf_svc_grid, xgb_grid, rf_grid) mdata = {} mparams = {} mscores = pd.DataFrame(index=base_names, columns=['accuracy', 'precision', 'recall', 'roc_auc']) for (base_name, base_model, param_grid) in zip(base_names, base_models, param_grids): model = GridSearchCV(base_model, param_grid, scoring='f1', cv=cv, n_jobs=n_jobs, verbose=int(verbose)) model.fit(X_train, y_train) mdata[base_name] = model params = model.best_estimator_.get_params() for name in params.keys(): if hasattr(params[name], 'get_params'): params[name] = params[name].get_params() mparams[base_name] = params y_pred = model.predict(X_test) if hasattr(model, 'decision_function'): y_pred_scores = model.decision_function(X_test) elif hasattr(model, 'predict_proba'): y_pred_scores = model.predict_proba(X_test)[(:, 1)] else: print(f"warning: {model.__class__.__name__} can't compute ROC AUC score; does not have decision_function or predict_proba", file=sys.stderr) y_pred_scores = None mscores.loc[(base_name, :)] = (accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), (np.nan if (y_pred_scores is None) else roc_auc_score(y_test, y_pred_scores))) if report: print('---- classifier quality metrics ', end=) print(('-' * 48), end='\n\n') print(mscores) return (mdata, mparams, mscores)
@persist_csv(target=(BASE_RESULTS_DIR + '/vte_whitened_scores.csv'), enabled=True, out_transform=(lambda x: x[2])) @persist_json(target=(BASE_RESULTS_DIR + '/vte_whitened_params.json'), enabled=True, out_transform=(lambda x: x[1])) @persist_pickle(target=(BASE_RESULTS_DIR + '/vte_whitened.pickle'), enabled=True, out_transform=(lambda x: x[0])) def fit_pca_whitened_classifiers(cv=5, n_jobs=(- 1), verbose=False, report=False, random_seed=None): 'Fit classifiers to [non-]undersampled PCA-whitened input data.\n \n .. note:: Spits a lot of ``liblinear`` convergence warnings.\n\n We start with the top 7 columns by univariate ROC AUC for the VTE data.\n We perform a whitening PCA transform of the data and then fit classifiers\n with balanced class weights. Formerly oversampling of the minority class\n was done with the use of a :class:`sklearn.model_selection.PredefinedSplit`\n to prevent the oversampled data from leaking into the validation sets\n during the grid search (all oversampled data appended to end of training\n set and now allowed to be part of validation sets), but the improvement was\n not as much as one would have hoped (actually worse). So we ended up going\n back to just using balanced class weights.\n\n Use 5-fold (by default) cross-validation to choose the best parameters,\n refit on best, evaluate accuracy, precision, recall, ROC AUC.\n\n Note that we need a scaler before doing PCA. Use F1 score to pick model.\n\n :param cv: Number of CV splits to make when doing grid search.\n :type cv: int, optional\n :param n_jobs: Number of jobs to run in parallel when grid searching.\n Defaults to ``-1`` to distribute load to all threads.\n :type n_jobs: int, optional\n :param verbose: Verbosity of the\n :class:`~sklearn.model_selection.GridSearchCV` during searching/fitting.\n :type verbose: bool, optional\n :param report: If ``True``, print to stdout a report on model scores.\n :type report: bool, optional\n :param random_seed: A int seed to pass for multiple calls to this function\n to be reproducible. Leave ``None`` for stochastic behavior.\n :type random_state: int, optional\n :rtype: tuple\n ' if (cv < 3): raise ValueError('cv folds must be 3 or more') best_cols = list(pd.read_csv((BASE_RESULTS_DIR + '/vte_selected_cols.csv'), index_col=0).index) (X_train, X_test, y_train, y_test) = vte_slp_factory(data_transform=replace_hdl_tot_chol_with_ratio, inputs=best_cols, targets=VTE_OUTPUT_COLS, dropna=True, random_state=random_seed) scaler = StandardScaler() scaler.fit(X_train) (X_train, X_test) = (scaler.transform(X_train), scaler.transform(X_test)) pca = PCA(whiten=True, random_state=random_seed) pca.fit(X_train) (X_train, X_test) = (pca.transform(X_train), pca.transform(X_test)) base_names = ('l2_logistic', 'l2_linsvc', 'bagged_l2_logistic', 'bagged_l2_linsvc', 'rbf_svc', 'xgboost', 'random_forest') lrc_l2_grid = dict(penalty=['l2'], C=[1], fit_intercept=[True], max_iter=[100], class_weight=['balanced']) lsvc_l2_grid = dict(penalty=['l2'], loss=['hinge', 'squared_hinge'], dual=[True], random_state=[random_seed], C=[1, 5, 10], fit_intercept=[True], class_weight=['balanced']) bag_lrc_l2_grid = dict(base_estimator=[LogisticRegression(fit_intercept=True, class_weight='balanced')], n_estimators=[100, 200, 400], random_state=[random_seed]) bag_lsvc_l2_grid = dict(base_estimator=[LinearSVC(loss='hinge', fit_intercept=True, class_weight='balanced', random_state=random_seed)], n_estimators=[100, 200, 400], random_state=[random_seed]) rbf_svc_grid = dict(C=[0.1, 1, 5], kernel=['rbf'], gamma=['scale', 'auto'], class_weight=['balanced']) neg_pos_ratio = ((y_train == 0).sum() / (y_train == 1).sum()) xgb_grid = dict(max_depth=[3], n_estimators=[400, 600, 800], learning_rate=[0.1], booster=['gbtree'], subsample=[0.5], reg_lambda=[0.1, 1], random_state=[random_seed], scale_pos_weight=[neg_pos_ratio]) rf_grid = dict(max_depth=[6, 12, 24], n_estimators=[100, 200, 400], criterion=['entropy'], random_state=[random_seed], class_weight=['balanced']) base_models = (LogisticRegression(), LinearSVC(), BaggingClassifier(), BaggingClassifier(), SVC(), XGBClassifier(), RandomForestClassifier()) base_names = ('l2_logistic', 'l2_linsvc', 'bagged_l2_logistic', 'bagged_l2_linsvc', 'rbf_svc', 'xgboost', 'random_forest') param_grids = (lrc_l2_grid, lsvc_l2_grid, bag_lrc_l2_grid, bag_lsvc_l2_grid, rbf_svc_grid, xgb_grid, rf_grid) mdata = {} mparams = {} mscores = pd.DataFrame(index=base_names, columns=['accuracy', 'precision', 'recall', 'roc_auc']) for (base_name, base_model, param_grid) in zip(base_names, base_models, param_grids): model = GridSearchCV(base_model, param_grid, scoring='f1', cv=cv, n_jobs=n_jobs, verbose=int(verbose)) model.fit(X_train, y_train) mdata[base_name] = model params = model.best_estimator_.get_params() for name in params.keys(): if hasattr(params[name], 'get_params'): params[name] = params[name].get_params() mparams[base_name] = params y_pred = model.predict(X_test) if hasattr(model, 'decision_function'): y_pred_scores = model.decision_function(X_test) elif hasattr(model, 'predict_proba'): y_pred_scores = model.predict_proba(X_test)[(:, 1)] else: print(f"warning: {model.__class__.__name__} can't compute ROC AUC score; does not have decision_function or predict_proba", file=sys.stderr) y_pred_scores = None mscores.loc[(base_name, :)] = (accuracy_score(y_test, y_pred), precision_score(y_test, y_pred), recall_score(y_test, y_pred), (np.nan if (y_pred_scores is None) else roc_auc_score(y_test, y_pred_scores))) if report: print('---- classifier quality metrics ', end=) print(('-' * 48), end='\n\n') print(mscores) return (mdata, mparams, mscores)<|docstring|>Fit classifiers to [non-]undersampled PCA-whitened input data. .. note:: Spits a lot of ``liblinear`` convergence warnings. We start with the top 7 columns by univariate ROC AUC for the VTE data. We perform a whitening PCA transform of the data and then fit classifiers with balanced class weights. Formerly oversampling of the minority class was done with the use of a :class:`sklearn.model_selection.PredefinedSplit` to prevent the oversampled data from leaking into the validation sets during the grid search (all oversampled data appended to end of training set and now allowed to be part of validation sets), but the improvement was not as much as one would have hoped (actually worse). So we ended up going back to just using balanced class weights. Use 5-fold (by default) cross-validation to choose the best parameters, refit on best, evaluate accuracy, precision, recall, ROC AUC. Note that we need a scaler before doing PCA. Use F1 score to pick model. :param cv: Number of CV splits to make when doing grid search. :type cv: int, optional :param n_jobs: Number of jobs to run in parallel when grid searching. Defaults to ``-1`` to distribute load to all threads. :type n_jobs: int, optional :param verbose: Verbosity of the :class:`~sklearn.model_selection.GridSearchCV` during searching/fitting. :type verbose: bool, optional :param report: If ``True``, print to stdout a report on model scores. :type report: bool, optional :param random_seed: A int seed to pass for multiple calls to this function to be reproducible. Leave ``None`` for stochastic behavior. :type random_state: int, optional :rtype: tuple<|endoftext|>
38617f9a6eee4b86b18d236ee43e8b8e1edc4668034232b8458464a22c729d71
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') def test_ip_association_and_allocation_of_other_project(self, net_get, fixed_get): 'Makes sure that we cannot deallocaate or disassociate\n a public ip of other project.\n ' net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') context2 = context.RequestContext('user', 'project2') float_ip = db.floating_ip_create(context1.elevated(), {'address': '1.2.3.4', 'project_id': context1.project_id}) float_addr = float_ip['address'] instance = db.instance_create(context1, {'project_id': 'project1'}) fix_addr = db.fixed_ip_associate_pool(context1.elevated(), 1, instance['uuid']).address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.assertRaises(exception.NotAuthorized, self.network.associate_floating_ip, context2, float_addr, fix_addr) self.assertRaises(exception.NotAuthorized, self.network.deallocate_floating_ip, context2, float_addr) self.network.associate_floating_ip(context1, float_addr, fix_addr) self.assertRaises(exception.NotAuthorized, self.network.disassociate_floating_ip, context2, float_addr) self.network.disassociate_floating_ip(context1, float_addr) self.network.deallocate_floating_ip(context1, float_addr) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') db.floating_ip_destroy(context1.elevated(), float_addr) db.fixed_ip_disassociate(context1.elevated(), fix_addr)
Makes sure that we cannot deallocaate or disassociate a public ip of other project.
nova/tests/network/test_manager.py
test_ip_association_and_allocation_of_other_project
bopopescu/nova-week
7
python
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') def test_ip_association_and_allocation_of_other_project(self, net_get, fixed_get): 'Makes sure that we cannot deallocaate or disassociate\n a public ip of other project.\n ' net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') context2 = context.RequestContext('user', 'project2') float_ip = db.floating_ip_create(context1.elevated(), {'address': '1.2.3.4', 'project_id': context1.project_id}) float_addr = float_ip['address'] instance = db.instance_create(context1, {'project_id': 'project1'}) fix_addr = db.fixed_ip_associate_pool(context1.elevated(), 1, instance['uuid']).address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.assertRaises(exception.NotAuthorized, self.network.associate_floating_ip, context2, float_addr, fix_addr) self.assertRaises(exception.NotAuthorized, self.network.deallocate_floating_ip, context2, float_addr) self.network.associate_floating_ip(context1, float_addr, fix_addr) self.assertRaises(exception.NotAuthorized, self.network.disassociate_floating_ip, context2, float_addr) self.network.disassociate_floating_ip(context1, float_addr) self.network.deallocate_floating_ip(context1, float_addr) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') db.floating_ip_destroy(context1.elevated(), float_addr) db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') def test_ip_association_and_allocation_of_other_project(self, net_get, fixed_get): 'Makes sure that we cannot deallocaate or disassociate\n a public ip of other project.\n ' net_get.return_value = dict(test_network.fake_network, **networks[1]) context1 = context.RequestContext('user', 'project1') context2 = context.RequestContext('user', 'project2') float_ip = db.floating_ip_create(context1.elevated(), {'address': '1.2.3.4', 'project_id': context1.project_id}) float_addr = float_ip['address'] instance = db.instance_create(context1, {'project_id': 'project1'}) fix_addr = db.fixed_ip_associate_pool(context1.elevated(), 1, instance['uuid']).address fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.assertRaises(exception.NotAuthorized, self.network.associate_floating_ip, context2, float_addr, fix_addr) self.assertRaises(exception.NotAuthorized, self.network.deallocate_floating_ip, context2, float_addr) self.network.associate_floating_ip(context1, float_addr, fix_addr) self.assertRaises(exception.NotAuthorized, self.network.disassociate_floating_ip, context2, float_addr) self.network.disassociate_floating_ip(context1, float_addr) self.network.deallocate_floating_ip(context1, float_addr) self.network.deallocate_fixed_ip(context1, fix_addr, 'fake') db.floating_ip_destroy(context1.elevated(), float_addr) db.fixed_ip_disassociate(context1.elevated(), fix_addr)<|docstring|>Makes sure that we cannot deallocaate or disassociate a public ip of other project.<|endoftext|>
747115fb527bb14ad0fe18b1f368a55b975378fa86e01a1d89ad6645294abf3c
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed(self, fixed_update, net_get, fixed_get): "Verify that release is called properly.\n\n Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return\n " net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.mox.StubOutWithMock(linux_net, 'release_dhcp') linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00') self.mox.ReplayAll() self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False, 'virtual_interface_id': None})
Verify that release is called properly. Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
nova/tests/network/test_manager.py
test_deallocate_fixed
bopopescu/nova-week
7
python
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed(self, fixed_update, net_get, fixed_get): "Verify that release is called properly.\n\n Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return\n " net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.mox.StubOutWithMock(linux_net, 'release_dhcp') linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00') self.mox.ReplayAll() self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False, 'virtual_interface_id': None})
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed(self, fixed_update, net_get, fixed_get): "Verify that release is called properly.\n\n Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return\n " net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return vifs[0] self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, instance_uuid=instance.uuid, allocated=True, virtual_interface_id=3, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) self.mox.StubOutWithMock(linux_net, 'release_dhcp') linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address, 'DE:AD:BE:EF:00:00') self.mox.ReplayAll() self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False, 'virtual_interface_id': None})<|docstring|>Verify that release is called properly. Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return<|endoftext|>
1924425acdb0030490527a738a9dcdc717e63b014764a8a89b4bc9107566aad4
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get): "Verify that deallocate doesn't raise when no vif is returned.\n\n Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return\n " net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return None self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) fixed_update.return_value = fixed_get.return_value self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False, 'virtual_interface_id': None})
Verify that deallocate doesn't raise when no vif is returned. Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
nova/tests/network/test_manager.py
test_deallocate_fixed_no_vif
bopopescu/nova-week
7
python
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get): "Verify that deallocate doesn't raise when no vif is returned.\n\n Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return\n " net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return None self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) fixed_update.return_value = fixed_get.return_value self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False, 'virtual_interface_id': None})
@mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.fixed_ip_update') def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get): "Verify that deallocate doesn't raise when no vif is returned.\n\n Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return\n " net_get.return_value = dict(test_network.fake_network, **networks[1]) def vif_get(_context, _vif_id): return None self.stubs.Set(db, 'virtual_interface_get', vif_get) context1 = context.RequestContext('user', 'project1') instance = db.instance_create(context1, {'project_id': 'project1'}) elevated = context1.elevated() fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid']) fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip, address=fix_addr.address, allocated=True, virtual_interface_id=3, instance_uuid=instance.uuid, network=dict(test_network.fake_network, **networks[1])) self.flags(force_dhcp_release=True) fixed_update.return_value = fixed_get.return_value self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake') fixed_update.assert_called_once_with(context1, fix_addr.address, {'allocated': False, 'virtual_interface_id': None})<|docstring|>Verify that deallocate doesn't raise when no vif is returned. Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return<|endoftext|>
e99f7105142e20e19114e1160a89472e6d0d01bb55ebace4521c7dc9c47ffb35
def test_flatdhcpmanager_dynamic_fixed_range(self): 'Test FlatDHCPManager NAT rules for fixed_range.' self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db self._test_init_host_dynamic_fixed_range(self.network)
Test FlatDHCPManager NAT rules for fixed_range.
nova/tests/network/test_manager.py
test_flatdhcpmanager_dynamic_fixed_range
bopopescu/nova-week
7
python
def test_flatdhcpmanager_dynamic_fixed_range(self): self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db self._test_init_host_dynamic_fixed_range(self.network)
def test_flatdhcpmanager_dynamic_fixed_range(self): self.network = network_manager.FlatDHCPManager(host=HOST) self.network.db = db self._test_init_host_dynamic_fixed_range(self.network)<|docstring|>Test FlatDHCPManager NAT rules for fixed_range.<|endoftext|>
12bd51bb56b82b3004c52980e6fb3ba79ba1e973deebcf6d88dfd78386a2dc2f
def test_vlanmanager_dynamic_fixed_range(self): 'Test VlanManager NAT rules for fixed_range.' self.network = network_manager.VlanManager(host=HOST) self.network.db = db self._test_init_host_dynamic_fixed_range(self.network)
Test VlanManager NAT rules for fixed_range.
nova/tests/network/test_manager.py
test_vlanmanager_dynamic_fixed_range
bopopescu/nova-week
7
python
def test_vlanmanager_dynamic_fixed_range(self): self.network = network_manager.VlanManager(host=HOST) self.network.db = db self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self): self.network = network_manager.VlanManager(host=HOST) self.network.db = db self._test_init_host_dynamic_fixed_range(self.network)<|docstring|>Test VlanManager NAT rules for fixed_range.<|endoftext|>
cbc8057a4d136b19f7e9e5fb7585aac986feae634aa960a69737e2ee9523c2fd
def test_rpc_allocate(self): "Test to verify bug 855030 doesn't resurface.\n\n Mekes sure _rpc_allocate_fixed_ip returns a value so the call\n returns properly and the greenpool completes.\n " address = '10.10.10.10' def fake_allocate(*args, **kwargs): return address def fake_network_get(*args, **kwargs): return test_network.fake_network self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate) self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get) rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context, 'fake_instance', 'fake_network') self.assertEqual(rval, address)
Test to verify bug 855030 doesn't resurface. Mekes sure _rpc_allocate_fixed_ip returns a value so the call returns properly and the greenpool completes.
nova/tests/network/test_manager.py
test_rpc_allocate
bopopescu/nova-week
7
python
def test_rpc_allocate(self): "Test to verify bug 855030 doesn't resurface.\n\n Mekes sure _rpc_allocate_fixed_ip returns a value so the call\n returns properly and the greenpool completes.\n " address = '10.10.10.10' def fake_allocate(*args, **kwargs): return address def fake_network_get(*args, **kwargs): return test_network.fake_network self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate) self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get) rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context, 'fake_instance', 'fake_network') self.assertEqual(rval, address)
def test_rpc_allocate(self): "Test to verify bug 855030 doesn't resurface.\n\n Mekes sure _rpc_allocate_fixed_ip returns a value so the call\n returns properly and the greenpool completes.\n " address = '10.10.10.10' def fake_allocate(*args, **kwargs): return address def fake_network_get(*args, **kwargs): return test_network.fake_network self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate) self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get) rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context, 'fake_instance', 'fake_network') self.assertEqual(rval, address)<|docstring|>Test to verify bug 855030 doesn't resurface. Mekes sure _rpc_allocate_fixed_ip returns a value so the call returns properly and the greenpool completes.<|endoftext|>
a2ed9b791a6561dcddb274dae2b02daf68511af016a03d1aae3fa458bdd8d26b
def authenticate(): 'Sends a 401 response that enables basic auth' return Response('Unauthorized', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
Sends a 401 response that enables basic auth
gosecure_app.py
authenticate
nkrios/goSecure
790
python
def authenticate(): return Response('Unauthorized', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
def authenticate(): return Response('Unauthorized', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})<|docstring|>Sends a 401 response that enables basic auth<|endoftext|>
f7a3eb9b2cd4676e04444e5ea98f2d6de40566cfb2b1d91caafffbc64e359d2f
def _initialize(self, resource=None, id_query=False, reset=False, **keywargs): 'Opens an I/O session to the instrument.' super(agilentBase8340, self)._initialize(resource, id_query, reset, **keywargs) if (not self._driver_operation_simulate): self._clear() if (id_query and (not self._driver_operation_simulate)): id = self.identity.instrument_model id_check = self._instrument_id id_short = id[:len(id_check)] if (id_short != id_check): raise Exception('Instrument ID mismatch, expecting %s, got %s', id_check, id_short) if reset: self.utility_reset()
Opens an I/O session to the instrument.
ivi/agilent/agilentBase8340.py
_initialize
sacherjj/python-ivi
161
python
def _initialize(self, resource=None, id_query=False, reset=False, **keywargs): super(agilentBase8340, self)._initialize(resource, id_query, reset, **keywargs) if (not self._driver_operation_simulate): self._clear() if (id_query and (not self._driver_operation_simulate)): id = self.identity.instrument_model id_check = self._instrument_id id_short = id[:len(id_check)] if (id_short != id_check): raise Exception('Instrument ID mismatch, expecting %s, got %s', id_check, id_short) if reset: self.utility_reset()
def _initialize(self, resource=None, id_query=False, reset=False, **keywargs): super(agilentBase8340, self)._initialize(resource, id_query, reset, **keywargs) if (not self._driver_operation_simulate): self._clear() if (id_query and (not self._driver_operation_simulate)): id = self.identity.instrument_model id_check = self._instrument_id id_short = id[:len(id_check)] if (id_short != id_check): raise Exception('Instrument ID mismatch, expecting %s, got %s', id_check, id_short) if reset: self.utility_reset()<|docstring|>Opens an I/O session to the instrument.<|endoftext|>
a49021676acda828a4da445733070097209d65e1619c14bb544ab947eed09d37
def __init__(self, command, option=None, pattern='EOS', timeout_second=10): '* Get communication with unix process using pexpect module.' self.command = command self.timeout_second = timeout_second self.pattern = pattern self.option = option self.launch_process(command)
* Get communication with unix process using pexpect module.
JapaneseTokenizer/common/sever_handler.py
__init__
fumankaitori/JapaneseTokenizers
134
python
def __init__(self, command, option=None, pattern='EOS', timeout_second=10): self.command = command self.timeout_second = timeout_second self.pattern = pattern self.option = option self.launch_process(command)
def __init__(self, command, option=None, pattern='EOS', timeout_second=10): self.command = command self.timeout_second = timeout_second self.pattern = pattern self.option = option self.launch_process(command)<|docstring|>* Get communication with unix process using pexpect module.<|endoftext|>
cc7ca6c6af49b98950e4ad26b4af9548d21be8ec8daa044928b6da0b823ed7d8
def launch_process(self, command): '* What you can do\n - It starts process and keep it.\n ' if (not (self.option is None)): command_plus_option = ((self.command + ' ') + self.option) else: command_plus_option = self.command if six.PY3: if (shutil.which(command) is None): raise Exception('No command at {}'.format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid else: doc_command_string = "echo '' | {}".format(command) command_check = os.system(doc_command_string) if (not (command_check == 0)): raise Exception('No command at {}'.format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid
* What you can do - It starts process and keep it.
JapaneseTokenizer/common/sever_handler.py
launch_process
fumankaitori/JapaneseTokenizers
134
python
def launch_process(self, command): '* What you can do\n - It starts process and keep it.\n ' if (not (self.option is None)): command_plus_option = ((self.command + ' ') + self.option) else: command_plus_option = self.command if six.PY3: if (shutil.which(command) is None): raise Exception('No command at {}'.format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid else: doc_command_string = "echo | {}".format(command) command_check = os.system(doc_command_string) if (not (command_check == 0)): raise Exception('No command at {}'.format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid
def launch_process(self, command): '* What you can do\n - It starts process and keep it.\n ' if (not (self.option is None)): command_plus_option = ((self.command + ' ') + self.option) else: command_plus_option = self.command if six.PY3: if (shutil.which(command) is None): raise Exception('No command at {}'.format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid else: doc_command_string = "echo | {}".format(command) command_check = os.system(doc_command_string) if (not (command_check == 0)): raise Exception('No command at {}'.format(command)) else: self.process_analyzer = pexpect.spawnu(command_plus_option) self.process_id = self.process_analyzer.pid<|docstring|>* What you can do - It starts process and keep it.<|endoftext|>
b8beef1ec02da132e2e6aac8c496ac1a3407e7ed279d2ef08fc35d57760fefce
def stop_process(self): "* What you can do\n - You're able to stop the process which this instance has now.\n " if hasattr(self, 'process_analyzer'): self.process_analyzer.kill(sig=9) else: pass return True
* What you can do - You're able to stop the process which this instance has now.
JapaneseTokenizer/common/sever_handler.py
stop_process
fumankaitori/JapaneseTokenizers
134
python
def stop_process(self): "* What you can do\n - You're able to stop the process which this instance has now.\n " if hasattr(self, 'process_analyzer'): self.process_analyzer.kill(sig=9) else: pass return True
def stop_process(self): "* What you can do\n - You're able to stop the process which this instance has now.\n " if hasattr(self, 'process_analyzer'): self.process_analyzer.kill(sig=9) else: pass return True<|docstring|>* What you can do - You're able to stop the process which this instance has now.<|endoftext|>
20d0968cad3dc9989bc9563899eed7a638f809594feb14103080f0559a0550a9
def __query(self, input_string): '* What you can do\n - It takes the result of Juman++\n - This function monitors time which takes for getting the result.\n ' signal.signal(signal.SIGALRM, self.__notify_handler) signal.alarm(self.timeout_second) self.process_analyzer.sendline(input_string) buffer = '' while True: line_string = self.process_analyzer.readline() if (line_string.strip() == input_string): 'Skip if process returns the same input string' continue elif (line_string.strip() == self.pattern): buffer += line_string signal.alarm(0) return buffer else: buffer += line_string
* What you can do - It takes the result of Juman++ - This function monitors time which takes for getting the result.
JapaneseTokenizer/common/sever_handler.py
__query
fumankaitori/JapaneseTokenizers
134
python
def __query(self, input_string): '* What you can do\n - It takes the result of Juman++\n - This function monitors time which takes for getting the result.\n ' signal.signal(signal.SIGALRM, self.__notify_handler) signal.alarm(self.timeout_second) self.process_analyzer.sendline(input_string) buffer = while True: line_string = self.process_analyzer.readline() if (line_string.strip() == input_string): 'Skip if process returns the same input string' continue elif (line_string.strip() == self.pattern): buffer += line_string signal.alarm(0) return buffer else: buffer += line_string
def __query(self, input_string): '* What you can do\n - It takes the result of Juman++\n - This function monitors time which takes for getting the result.\n ' signal.signal(signal.SIGALRM, self.__notify_handler) signal.alarm(self.timeout_second) self.process_analyzer.sendline(input_string) buffer = while True: line_string = self.process_analyzer.readline() if (line_string.strip() == input_string): 'Skip if process returns the same input string' continue elif (line_string.strip() == self.pattern): buffer += line_string signal.alarm(0) return buffer else: buffer += line_string<|docstring|>* What you can do - It takes the result of Juman++ - This function monitors time which takes for getting the result.<|endoftext|>
8bea8e77fc144aa140d2e1f5e79ded0ec2cacd6aaa738e8eb0d7bbc4dd056220
def __init__(self, ax, s='Text', x=0, y=0, strings=None, text_id=None, **text_kwargs): '\n\t\t:param ax:\n\t\t:param s:\n\t\t:param x:\n\t\t:param y:\n\t\t:param strings: list of strings, used for animating text\n\t\t:param text_kwargs:\n\t\t' super().__init__(x=x, y=y, **text_kwargs) self.id = text_id self.strings = strings self.c = 0 ax.add_artist(self)
:param ax: :param s: :param x: :param y: :param strings: list of strings, used for animating text :param text_kwargs:
viseng/annotations.py
__init__
OllieBoyne/vis-eng
0
python
def __init__(self, ax, s='Text', x=0, y=0, strings=None, text_id=None, **text_kwargs): '\n\t\t:param ax:\n\t\t:param s:\n\t\t:param x:\n\t\t:param y:\n\t\t:param strings: list of strings, used for animating text\n\t\t:param text_kwargs:\n\t\t' super().__init__(x=x, y=y, **text_kwargs) self.id = text_id self.strings = strings self.c = 0 ax.add_artist(self)
def __init__(self, ax, s='Text', x=0, y=0, strings=None, text_id=None, **text_kwargs): '\n\t\t:param ax:\n\t\t:param s:\n\t\t:param x:\n\t\t:param y:\n\t\t:param strings: list of strings, used for animating text\n\t\t:param text_kwargs:\n\t\t' super().__init__(x=x, y=y, **text_kwargs) self.id = text_id self.strings = strings self.c = 0 ax.add_artist(self)<|docstring|>:param ax: :param s: :param x: :param y: :param strings: list of strings, used for animating text :param text_kwargs:<|endoftext|>
a23d4a7a0df80d500faca29c0395903c843e81c81e0fb7b86f79b0de58fccd7a
def __len__(self): 'Length if animated, in number of provided strings' return len(self.strings)
Length if animated, in number of provided strings
viseng/annotations.py
__len__
OllieBoyne/vis-eng
0
python
def __len__(self): return len(self.strings)
def __len__(self): return len(self.strings)<|docstring|>Length if animated, in number of provided strings<|endoftext|>
6100d50c0962d6981b5304b602a9fc5fc16877aeb41c58d2eef4514d42e94fd6
def __init__(self): 'construction ' mediaBase.MediaBase.__init__(self) self.pre = (self.prefix + 'querypipe') self.pipeline_name = self.pre self.client = media_client.MediaClient(media_config.config)
construction
test/media/qa_test/test_query_pipeline.py
__init__
yunfan/bce-sdk-python
22
python
def __init__(self): ' ' mediaBase.MediaBase.__init__(self) self.pre = (self.prefix + 'querypipe') self.pipeline_name = self.pre self.client = media_client.MediaClient(media_config.config)
def __init__(self): ' ' mediaBase.MediaBase.__init__(self) self.pre = (self.prefix + 'querypipe') self.pipeline_name = self.pre self.client = media_client.MediaClient(media_config.config)<|docstring|>construction<|endoftext|>
4afca31964e9129fe380fc601c0a13a27736f3e8a7847cf9c280fba4d7b688b4
def setUp(self): 'create env' ret = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) nose.tools.assert_is_not_none(ret)
create env
test/media/qa_test/test_query_pipeline.py
setUp
yunfan/bce-sdk-python
22
python
def setUp(self): ret = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) nose.tools.assert_is_not_none(ret)
def setUp(self): ret = self.client.create_pipeline(self.pipeline_name, self.sourceBucket, self.targetBucket) nose.tools.assert_is_not_none(ret)<|docstring|>create env<|endoftext|>
cb26f65e96eb90f829b468e04f34f8081e4a0ae11a4cc89bbd037d8c5adab65f
def tearDown(self): 'clear env' result = self.client.list_pipelines() for each_val in result.pipelines: pipeline_name = each_val.pipeline_name if pipeline_name.startswith(self.pre): resp = self.client.delete_pipeline(pipeline_name) nose.tools.assert_is_not_none(resp)
clear env
test/media/qa_test/test_query_pipeline.py
tearDown
yunfan/bce-sdk-python
22
python
def tearDown(self): result = self.client.list_pipelines() for each_val in result.pipelines: pipeline_name = each_val.pipeline_name if pipeline_name.startswith(self.pre): resp = self.client.delete_pipeline(pipeline_name) nose.tools.assert_is_not_none(resp)
def tearDown(self): result = self.client.list_pipelines() for each_val in result.pipelines: pipeline_name = each_val.pipeline_name if pipeline_name.startswith(self.pre): resp = self.client.delete_pipeline(pipeline_name) nose.tools.assert_is_not_none(resp)<|docstring|>clear env<|endoftext|>
50839e72d3eab8a82b03ab5d0074dad4f1ffdc3fb9471b6094a4972dbcab823d
def test_query_pipeline_exsit(self): 'query exsit pipeline' resp = self.client.get_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) nose.tools.assert_equal(resp.state, 'ACTIVE') nose.tools.assert_equal(resp.pipeline_name, self.pipeline_name)
query exsit pipeline
test/media/qa_test/test_query_pipeline.py
test_query_pipeline_exsit
yunfan/bce-sdk-python
22
python
def test_query_pipeline_exsit(self): resp = self.client.get_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) nose.tools.assert_equal(resp.state, 'ACTIVE') nose.tools.assert_equal(resp.pipeline_name, self.pipeline_name)
def test_query_pipeline_exsit(self): resp = self.client.get_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) nose.tools.assert_equal(resp.state, 'ACTIVE') nose.tools.assert_equal(resp.pipeline_name, self.pipeline_name)<|docstring|>query exsit pipeline<|endoftext|>
84aaa5cf5124e7438bbcf6d09bd09bdbd7998dee7057be3658d8cb1ef8e3e35b
def test_query_pipeline_is_deleted(self): 'query deleted pipeline' resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) resp = self.client.get_pipeline(self.pipeline_name) nose.tools.assert_equal(resp.state, 'INACTIVE')
query deleted pipeline
test/media/qa_test/test_query_pipeline.py
test_query_pipeline_is_deleted
yunfan/bce-sdk-python
22
python
def test_query_pipeline_is_deleted(self): resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) resp = self.client.get_pipeline(self.pipeline_name) nose.tools.assert_equal(resp.state, 'INACTIVE')
def test_query_pipeline_is_deleted(self): resp = self.client.delete_pipeline(self.pipeline_name) nose.tools.assert_is_not_none(resp) resp = self.client.get_pipeline(self.pipeline_name) nose.tools.assert_equal(resp.state, 'INACTIVE')<|docstring|>query deleted pipeline<|endoftext|>
c88ff1a1d4d89d399cc1a877d0df2e9b94477cfe3f6e1f553f46d27a50d16773
def test_query_pipeline_is_name_empty(self): 'pipeline name is empty' with nose.tools.assert_raises_regexp(BceClientError, "pipeline_name can't be empty string"): resp = self.client.get_pipeline('')
pipeline name is empty
test/media/qa_test/test_query_pipeline.py
test_query_pipeline_is_name_empty
yunfan/bce-sdk-python
22
python
def test_query_pipeline_is_name_empty(self): with nose.tools.assert_raises_regexp(BceClientError, "pipeline_name can't be empty string"): resp = self.client.get_pipeline()
def test_query_pipeline_is_name_empty(self): with nose.tools.assert_raises_regexp(BceClientError, "pipeline_name can't be empty string"): resp = self.client.get_pipeline()<|docstring|>pipeline name is empty<|endoftext|>
5242f74bff3d20d8a6bca609353a3d3bedf56bb6f2d1d0f588414250a547d6ae
def test_query_pipeline_is_name_none(self): 'pipeline name is none' with nose.tools.assert_raises_regexp(ValueError, 'arg "pipeline_name" should not be None'): self.client.get_pipeline(None)
pipeline name is none
test/media/qa_test/test_query_pipeline.py
test_query_pipeline_is_name_none
yunfan/bce-sdk-python
22
python
def test_query_pipeline_is_name_none(self): with nose.tools.assert_raises_regexp(ValueError, 'arg "pipeline_name" should not be None'): self.client.get_pipeline(None)
def test_query_pipeline_is_name_none(self): with nose.tools.assert_raises_regexp(ValueError, 'arg "pipeline_name" should not be None'): self.client.get_pipeline(None)<|docstring|>pipeline name is none<|endoftext|>
4935a36ae65882d45fc4218eec87f2b71818e609b5de9809be8e271479c4e46d
def test_query_pipeline_not_exist(self): 'pipeline name is not exist' pipeline_name = 'not_exist_pipeline' try: self.client.get_pipeline(pipeline_name) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert (True == False)
pipeline name is not exist
test/media/qa_test/test_query_pipeline.py
test_query_pipeline_not_exist
yunfan/bce-sdk-python
22
python
def test_query_pipeline_not_exist(self): pipeline_name = 'not_exist_pipeline' try: self.client.get_pipeline(pipeline_name) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert (True == False)
def test_query_pipeline_not_exist(self): pipeline_name = 'not_exist_pipeline' try: self.client.get_pipeline(pipeline_name) except BceHttpClientError as e: if isinstance(e.last_error, BceServerError): assert e.last_error.message.startswith('The requested pipeline does not exist') else: assert (True == False)<|docstring|>pipeline name is not exist<|endoftext|>
5a03ac2979fb029c6c30b869cbd9a3a9e0789ae126d1c6c71e39aad4a749f497
def drink_search(searchterm): '\n Returns the results from a search on the LCBO website\n :param searchterm: (string) Search term/key words\n :return: A results object containing a list of the search results.\n ' searchterm = re.sub(' ', '%20', searchterm) url = 'https://www.lcbo.com/webapp/wcs/stores/servlet/SearchDisplay?storeId=10203&searchTerm={}'.format(searchterm) try: res = {'result': []} req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) webpage = urlopen(req).read() soup = BeautifulSoup(webpage, 'lxml') product_ref = soup.find('div', class_='product_listing_container') product_names = product_ref.findAll('div', class_='product_name') product_prices = product_ref.findAll('div', class_='product_price') for product in product_names: try: product_name = product.find('a').contents[0] product_link = product.find('a').attrs.get('href', None) except: print('drink data not available') continue data = {'name': product_name, 'link': product_link} res['result'].append(data) for i in range(len(product_prices)): try: price = product_prices[i].find('span').contents[0] except Exception as e: print('price data not available') print(e) continue res['result'][i]['price'] = price.strip('\t\n') except Exception as e: print('failed to get page') print(e) res = None return res
Returns the results from a search on the LCBO website :param searchterm: (string) Search term/key words :return: A results object containing a list of the search results.
drinkSearch.py
drink_search
raunakb007/LCBgo
0
python
def drink_search(searchterm): '\n Returns the results from a search on the LCBO website\n :param searchterm: (string) Search term/key words\n :return: A results object containing a list of the search results.\n ' searchterm = re.sub(' ', '%20', searchterm) url = 'https://www.lcbo.com/webapp/wcs/stores/servlet/SearchDisplay?storeId=10203&searchTerm={}'.format(searchterm) try: res = {'result': []} req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) webpage = urlopen(req).read() soup = BeautifulSoup(webpage, 'lxml') product_ref = soup.find('div', class_='product_listing_container') product_names = product_ref.findAll('div', class_='product_name') product_prices = product_ref.findAll('div', class_='product_price') for product in product_names: try: product_name = product.find('a').contents[0] product_link = product.find('a').attrs.get('href', None) except: print('drink data not available') continue data = {'name': product_name, 'link': product_link} res['result'].append(data) for i in range(len(product_prices)): try: price = product_prices[i].find('span').contents[0] except Exception as e: print('price data not available') print(e) continue res['result'][i]['price'] = price.strip('\t\n') except Exception as e: print('failed to get page') print(e) res = None return res
def drink_search(searchterm): '\n Returns the results from a search on the LCBO website\n :param searchterm: (string) Search term/key words\n :return: A results object containing a list of the search results.\n ' searchterm = re.sub(' ', '%20', searchterm) url = 'https://www.lcbo.com/webapp/wcs/stores/servlet/SearchDisplay?storeId=10203&searchTerm={}'.format(searchterm) try: res = {'result': []} req = Request(url, headers={'User-Agent': 'Mozilla/5.0'}) webpage = urlopen(req).read() soup = BeautifulSoup(webpage, 'lxml') product_ref = soup.find('div', class_='product_listing_container') product_names = product_ref.findAll('div', class_='product_name') product_prices = product_ref.findAll('div', class_='product_price') for product in product_names: try: product_name = product.find('a').contents[0] product_link = product.find('a').attrs.get('href', None) except: print('drink data not available') continue data = {'name': product_name, 'link': product_link} res['result'].append(data) for i in range(len(product_prices)): try: price = product_prices[i].find('span').contents[0] except Exception as e: print('price data not available') print(e) continue res['result'][i]['price'] = price.strip('\t\n') except Exception as e: print('failed to get page') print(e) res = None return res<|docstring|>Returns the results from a search on the LCBO website :param searchterm: (string) Search term/key words :return: A results object containing a list of the search results.<|endoftext|>
e6b2c582e0b2d5498c15766fcf1f7bb3e801230862959da988d65ae00d66fc1e
def quick_inspect_amplitudes(N: np.array, E: np.array, C: np.array, s: int=1, logx: bool=False, logy: bool=False, loglog: bool=False, cmap: str='viridis', **kwargs) -> None: "A utility function to quickly visualise the distribution\n of the WA p-p amplitudes on the N and E components. \n\n Args:\n N (np.array): The p-p WA amplitudes in mm measured on the North component.\n E (np.array): The p-p WA amplitudes in mm measured on the North component.\n C (np.array): [description]\n s (int, optional): [description]. Defaults to 1.\n logx (bool, optional): [description]. Defaults to False.\n logy (bool, optional): [description]. Defaults to False.\n loglog (bool, optional): [description]. Defaults to False.\n cmap (str, optional): [description]. Defaults to 'viridis'.\n " s_scale = s diff = (N - E) s = np.std(diff) weights = (np.ones_like(diff) / float(len(diff))) (fig, axes) = plt.subplots(1, 2, figsize=((7 * 2.5), 6)) h = axes[0].scatter(N, E, c=C, cmap=cmap, **kwargs) oneone = [(np.min([N.min(), E.min()]) * 1.1), (np.max([N.max(), E.max()]) * 1.1)] axes[0].plot(oneone, oneone, 'k--', label='1:1') axes[0].plot(oneone, (oneone + (s * s_scale)), 'r--', label=f'+/- {s_scale}s: {(s_scale * s):.2f}') axes[0].plot(oneone, (oneone - (s * s_scale)), 'r--') xlabel = 'log10 p-p E-W [mm]' ylabel = 'log10 p-p N-S [mm]' axes[0].set_xlabel(xlabel) axes[0].set_ylabel(ylabel) axes[0].legend() fig.colorbar(h, ax=axes[0]) out = axes[1].hist(diff, bins=30, weights=weights, edgecolor='black', color='blue') axes[1].vlines((s * s_scale), 0, out[0].max(), color='r', linestyles='dashed', label=f'+/- {s_scale}s: {(s_scale * s):.2f}') axes[1].vlines(((- s) * s_scale), 0, out[0].max(), color='r', linestyles='dashed') axes[1].legend() axes[1].set_xlabel('log10 p-p differences')
A utility function to quickly visualise the distribution of the WA p-p amplitudes on the N and E components. Args: N (np.array): The p-p WA amplitudes in mm measured on the North component. E (np.array): The p-p WA amplitudes in mm measured on the North component. C (np.array): [description] s (int, optional): [description]. Defaults to 1. logx (bool, optional): [description]. Defaults to False. logy (bool, optional): [description]. Defaults to False. loglog (bool, optional): [description]. Defaults to False. cmap (str, optional): [description]. Defaults to 'viridis'.
catops/catops/plotting.py
quick_inspect_amplitudes
uofuseismo/YPMLRecalibration
0
python
def quick_inspect_amplitudes(N: np.array, E: np.array, C: np.array, s: int=1, logx: bool=False, logy: bool=False, loglog: bool=False, cmap: str='viridis', **kwargs) -> None: "A utility function to quickly visualise the distribution\n of the WA p-p amplitudes on the N and E components. \n\n Args:\n N (np.array): The p-p WA amplitudes in mm measured on the North component.\n E (np.array): The p-p WA amplitudes in mm measured on the North component.\n C (np.array): [description]\n s (int, optional): [description]. Defaults to 1.\n logx (bool, optional): [description]. Defaults to False.\n logy (bool, optional): [description]. Defaults to False.\n loglog (bool, optional): [description]. Defaults to False.\n cmap (str, optional): [description]. Defaults to 'viridis'.\n " s_scale = s diff = (N - E) s = np.std(diff) weights = (np.ones_like(diff) / float(len(diff))) (fig, axes) = plt.subplots(1, 2, figsize=((7 * 2.5), 6)) h = axes[0].scatter(N, E, c=C, cmap=cmap, **kwargs) oneone = [(np.min([N.min(), E.min()]) * 1.1), (np.max([N.max(), E.max()]) * 1.1)] axes[0].plot(oneone, oneone, 'k--', label='1:1') axes[0].plot(oneone, (oneone + (s * s_scale)), 'r--', label=f'+/- {s_scale}s: {(s_scale * s):.2f}') axes[0].plot(oneone, (oneone - (s * s_scale)), 'r--') xlabel = 'log10 p-p E-W [mm]' ylabel = 'log10 p-p N-S [mm]' axes[0].set_xlabel(xlabel) axes[0].set_ylabel(ylabel) axes[0].legend() fig.colorbar(h, ax=axes[0]) out = axes[1].hist(diff, bins=30, weights=weights, edgecolor='black', color='blue') axes[1].vlines((s * s_scale), 0, out[0].max(), color='r', linestyles='dashed', label=f'+/- {s_scale}s: {(s_scale * s):.2f}') axes[1].vlines(((- s) * s_scale), 0, out[0].max(), color='r', linestyles='dashed') axes[1].legend() axes[1].set_xlabel('log10 p-p differences')
def quick_inspect_amplitudes(N: np.array, E: np.array, C: np.array, s: int=1, logx: bool=False, logy: bool=False, loglog: bool=False, cmap: str='viridis', **kwargs) -> None: "A utility function to quickly visualise the distribution\n of the WA p-p amplitudes on the N and E components. \n\n Args:\n N (np.array): The p-p WA amplitudes in mm measured on the North component.\n E (np.array): The p-p WA amplitudes in mm measured on the North component.\n C (np.array): [description]\n s (int, optional): [description]. Defaults to 1.\n logx (bool, optional): [description]. Defaults to False.\n logy (bool, optional): [description]. Defaults to False.\n loglog (bool, optional): [description]. Defaults to False.\n cmap (str, optional): [description]. Defaults to 'viridis'.\n " s_scale = s diff = (N - E) s = np.std(diff) weights = (np.ones_like(diff) / float(len(diff))) (fig, axes) = plt.subplots(1, 2, figsize=((7 * 2.5), 6)) h = axes[0].scatter(N, E, c=C, cmap=cmap, **kwargs) oneone = [(np.min([N.min(), E.min()]) * 1.1), (np.max([N.max(), E.max()]) * 1.1)] axes[0].plot(oneone, oneone, 'k--', label='1:1') axes[0].plot(oneone, (oneone + (s * s_scale)), 'r--', label=f'+/- {s_scale}s: {(s_scale * s):.2f}') axes[0].plot(oneone, (oneone - (s * s_scale)), 'r--') xlabel = 'log10 p-p E-W [mm]' ylabel = 'log10 p-p N-S [mm]' axes[0].set_xlabel(xlabel) axes[0].set_ylabel(ylabel) axes[0].legend() fig.colorbar(h, ax=axes[0]) out = axes[1].hist(diff, bins=30, weights=weights, edgecolor='black', color='blue') axes[1].vlines((s * s_scale), 0, out[0].max(), color='r', linestyles='dashed', label=f'+/- {s_scale}s: {(s_scale * s):.2f}') axes[1].vlines(((- s) * s_scale), 0, out[0].max(), color='r', linestyles='dashed') axes[1].legend() axes[1].set_xlabel('log10 p-p differences')<|docstring|>A utility function to quickly visualise the distribution of the WA p-p amplitudes on the N and E components. Args: N (np.array): The p-p WA amplitudes in mm measured on the North component. E (np.array): The p-p WA amplitudes in mm measured on the North component. C (np.array): [description] s (int, optional): [description]. Defaults to 1. logx (bool, optional): [description]. Defaults to False. logy (bool, optional): [description]. Defaults to False. loglog (bool, optional): [description]. Defaults to False. cmap (str, optional): [description]. Defaults to 'viridis'.<|endoftext|>
c7a9a7dc8bddbbe8775fa2c143821e77eebd3b6ee30d4327155fcd54e2ccdf78
def magnitude_distance_plot(M: np.array, Dist: np.array, Dep: np.array, A: np.array) -> None: 'Plot the event magnitude (M) versus distance relationship,\n with the relevent side distributions. Bonus, also plots\n the depth distribution of causitive events, which is also of \n interest.\n\n Args:\n M (np.array): The event magnitudes.\n Dist (np.array): The source reciever distance (Rhyp or Repi, km).\n Dep (np.array): The focal depths of the events (km).\n A (np.array): The half p-p WA horizontal amplitude (mm).\n ' hkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Dist) + (1.0 / len(Dist)))) hdepkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Dep) + (1.0 / len(Dep)))) fac = 2.6 fig = plt.figure(constrained_layout=False, figsize=((7 * fac), (3 * fac))) gs1 = fig.add_gridspec(nrows=3, ncols=3, left=0.05, right=0.48, wspace=0.2, hspace=0.05) ax1 = fig.add_subplot(gs1[(:1, 0:(- 1))]) ax2 = fig.add_subplot(gs1[(1:, :(- 1))]) ax3 = fig.add_subplot(gs1[(1:, (- 1):)]) ax4 = fig.add_subplot(gs1[(0, (- 1))]) ax1.xaxis.set_visible(False) ax1.set_ylabel('Frac. in bin') ax1.set_xlim(np.log10([1, 200])) ax2.set_xscale('log') ax2.set_ylabel('Cat. Mag.') ax2.set_xlabel('$R_{hyp}$ [km]') ax2.xaxis.set_major_formatter(ticker.FuncFormatter((lambda y, pos: '{{:.{:1d}f}}'.format(int(np.maximum((- np.log10(y)), 0))).format(y)))) ax2.set_xlim([1, 200]) ax3.yaxis.set_visible(False) ax3.set_ylabel('Cat. Mag.') ax3.set_xlabel('Frac. in bin') ax3.yaxis.set_label_position('right') ax3.yaxis.tick_right() ax4.set_xlabel('Depth [km]') ax4.xaxis.set_label_position('top') ax4.yaxis.tick_right() ax4.xaxis.tick_top() ax1.hist(np.log10(Dist), **hkwargs) ax3.hist(M, orientation='horizontal', **hkwargs) ax4.hist(Dep, **hdepkwargs) sout = ax2.scatter(Dist, M, c=A, lw=1, cmap='viridis', s=10) cbaxes = inset_axes(ax2, width='35%', height='3%', loc=2) cbar = fig.colorbar(sout, cax=cbaxes, orientation='horizontal') cbar.set_label('$\\mathrm{log(A [mm]}$)', rotation=0, fontsize=14, horizontalalignment='center')
Plot the event magnitude (M) versus distance relationship, with the relevent side distributions. Bonus, also plots the depth distribution of causitive events, which is also of interest. Args: M (np.array): The event magnitudes. Dist (np.array): The source reciever distance (Rhyp or Repi, km). Dep (np.array): The focal depths of the events (km). A (np.array): The half p-p WA horizontal amplitude (mm).
catops/catops/plotting.py
magnitude_distance_plot
uofuseismo/YPMLRecalibration
0
python
def magnitude_distance_plot(M: np.array, Dist: np.array, Dep: np.array, A: np.array) -> None: 'Plot the event magnitude (M) versus distance relationship,\n with the relevent side distributions. Bonus, also plots\n the depth distribution of causitive events, which is also of \n interest.\n\n Args:\n M (np.array): The event magnitudes.\n Dist (np.array): The source reciever distance (Rhyp or Repi, km).\n Dep (np.array): The focal depths of the events (km).\n A (np.array): The half p-p WA horizontal amplitude (mm).\n ' hkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Dist) + (1.0 / len(Dist)))) hdepkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Dep) + (1.0 / len(Dep)))) fac = 2.6 fig = plt.figure(constrained_layout=False, figsize=((7 * fac), (3 * fac))) gs1 = fig.add_gridspec(nrows=3, ncols=3, left=0.05, right=0.48, wspace=0.2, hspace=0.05) ax1 = fig.add_subplot(gs1[(:1, 0:(- 1))]) ax2 = fig.add_subplot(gs1[(1:, :(- 1))]) ax3 = fig.add_subplot(gs1[(1:, (- 1):)]) ax4 = fig.add_subplot(gs1[(0, (- 1))]) ax1.xaxis.set_visible(False) ax1.set_ylabel('Frac. in bin') ax1.set_xlim(np.log10([1, 200])) ax2.set_xscale('log') ax2.set_ylabel('Cat. Mag.') ax2.set_xlabel('$R_{hyp}$ [km]') ax2.xaxis.set_major_formatter(ticker.FuncFormatter((lambda y, pos: '{{:.{:1d}f}}'.format(int(np.maximum((- np.log10(y)), 0))).format(y)))) ax2.set_xlim([1, 200]) ax3.yaxis.set_visible(False) ax3.set_ylabel('Cat. Mag.') ax3.set_xlabel('Frac. in bin') ax3.yaxis.set_label_position('right') ax3.yaxis.tick_right() ax4.set_xlabel('Depth [km]') ax4.xaxis.set_label_position('top') ax4.yaxis.tick_right() ax4.xaxis.tick_top() ax1.hist(np.log10(Dist), **hkwargs) ax3.hist(M, orientation='horizontal', **hkwargs) ax4.hist(Dep, **hdepkwargs) sout = ax2.scatter(Dist, M, c=A, lw=1, cmap='viridis', s=10) cbaxes = inset_axes(ax2, width='35%', height='3%', loc=2) cbar = fig.colorbar(sout, cax=cbaxes, orientation='horizontal') cbar.set_label('$\\mathrm{log(A [mm]}$)', rotation=0, fontsize=14, horizontalalignment='center')
def magnitude_distance_plot(M: np.array, Dist: np.array, Dep: np.array, A: np.array) -> None: 'Plot the event magnitude (M) versus distance relationship,\n with the relevent side distributions. Bonus, also plots\n the depth distribution of causitive events, which is also of \n interest.\n\n Args:\n M (np.array): The event magnitudes.\n Dist (np.array): The source reciever distance (Rhyp or Repi, km).\n Dep (np.array): The focal depths of the events (km).\n A (np.array): The half p-p WA horizontal amplitude (mm).\n ' hkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Dist) + (1.0 / len(Dist)))) hdepkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Dep) + (1.0 / len(Dep)))) fac = 2.6 fig = plt.figure(constrained_layout=False, figsize=((7 * fac), (3 * fac))) gs1 = fig.add_gridspec(nrows=3, ncols=3, left=0.05, right=0.48, wspace=0.2, hspace=0.05) ax1 = fig.add_subplot(gs1[(:1, 0:(- 1))]) ax2 = fig.add_subplot(gs1[(1:, :(- 1))]) ax3 = fig.add_subplot(gs1[(1:, (- 1):)]) ax4 = fig.add_subplot(gs1[(0, (- 1))]) ax1.xaxis.set_visible(False) ax1.set_ylabel('Frac. in bin') ax1.set_xlim(np.log10([1, 200])) ax2.set_xscale('log') ax2.set_ylabel('Cat. Mag.') ax2.set_xlabel('$R_{hyp}$ [km]') ax2.xaxis.set_major_formatter(ticker.FuncFormatter((lambda y, pos: '{{:.{:1d}f}}'.format(int(np.maximum((- np.log10(y)), 0))).format(y)))) ax2.set_xlim([1, 200]) ax3.yaxis.set_visible(False) ax3.set_ylabel('Cat. Mag.') ax3.set_xlabel('Frac. in bin') ax3.yaxis.set_label_position('right') ax3.yaxis.tick_right() ax4.set_xlabel('Depth [km]') ax4.xaxis.set_label_position('top') ax4.yaxis.tick_right() ax4.xaxis.tick_top() ax1.hist(np.log10(Dist), **hkwargs) ax3.hist(M, orientation='horizontal', **hkwargs) ax4.hist(Dep, **hdepkwargs) sout = ax2.scatter(Dist, M, c=A, lw=1, cmap='viridis', s=10) cbaxes = inset_axes(ax2, width='35%', height='3%', loc=2) cbar = fig.colorbar(sout, cax=cbaxes, orientation='horizontal') cbar.set_label('$\\mathrm{log(A [mm]}$)', rotation=0, fontsize=14, horizontalalignment='center')<|docstring|>Plot the event magnitude (M) versus distance relationship, with the relevent side distributions. Bonus, also plots the depth distribution of causitive events, which is also of interest. Args: M (np.array): The event magnitudes. Dist (np.array): The source reciever distance (Rhyp or Repi, km). Dep (np.array): The focal depths of the events (km). A (np.array): The half p-p WA horizontal amplitude (mm).<|endoftext|>
f27c142b4b5bba698a6dd0579594d3ee1527df044629d89fcdf02d144f76260d
def spatial_distribution_plot(Lon: np.array, Lat: np.array, Dep: np.array) -> None: '[summary]\n\n Args:\n Lon (np.array): [description]\n Lat (np.array): [description]\n Dep (np.array): [description]\n ' hkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Lon) + (1.0 / len(Lon)))) fac = 2.75 fig = plt.figure(constrained_layout=False, figsize=((7 * fac), (3 * fac))) gs1 = fig.add_gridspec(nrows=3, ncols=3, left=0.05, right=0.48, wspace=0.15, hspace=0.15) ax1 = fig.add_subplot(gs1[(:1, 0:(- 1))]) ax2 = fig.add_subplot(gs1[(1:, :(- 1))]) ax3 = fig.add_subplot(gs1[(1:, (- 1):)]) ax4 = fig.add_subplot(gs1[(0, (- 1))]) ax1.xaxis.set_visible(False) ax1.set_ylabel('Frac. in bin') ax2.set_ylabel('Latitude [deg]') ax2.set_xlabel('Longitude [deg]') ax3.yaxis.set_visible(False) ax3.set_ylabel('Cat. Mag.') ax3.set_xlabel('Frac. in bin') ax3.yaxis.set_label_position('right') ax3.yaxis.tick_right() ax4.set_xlabel('Depth [km]') ax4.xaxis.set_label_position('top') ax4.yaxis.tick_right() ax4.xaxis.tick_top() ax1.hist(Lon, **hkwargs) ax3.hist(Lat, orientation='horizontal', **hkwargs) ax4.hist(Dep, **hkwargs) sout = ax2.scatter(Lon, Lat, c=Dep, lw=1, cmap='Greys_r', s=10) ax2.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax2.yaxis.set_major_locator(plt.MultipleLocator(0.25)) cbaxes = inset_axes(ax2, width='2.5%', height='55%', loc='lower left') cbar = fig.colorbar(sout, cax=cbaxes) cbar.set_label('Depth [km]', rotation=90)
[summary] Args: Lon (np.array): [description] Lat (np.array): [description] Dep (np.array): [description]
catops/catops/plotting.py
spatial_distribution_plot
uofuseismo/YPMLRecalibration
0
python
def spatial_distribution_plot(Lon: np.array, Lat: np.array, Dep: np.array) -> None: '[summary]\n\n Args:\n Lon (np.array): [description]\n Lat (np.array): [description]\n Dep (np.array): [description]\n ' hkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Lon) + (1.0 / len(Lon)))) fac = 2.75 fig = plt.figure(constrained_layout=False, figsize=((7 * fac), (3 * fac))) gs1 = fig.add_gridspec(nrows=3, ncols=3, left=0.05, right=0.48, wspace=0.15, hspace=0.15) ax1 = fig.add_subplot(gs1[(:1, 0:(- 1))]) ax2 = fig.add_subplot(gs1[(1:, :(- 1))]) ax3 = fig.add_subplot(gs1[(1:, (- 1):)]) ax4 = fig.add_subplot(gs1[(0, (- 1))]) ax1.xaxis.set_visible(False) ax1.set_ylabel('Frac. in bin') ax2.set_ylabel('Latitude [deg]') ax2.set_xlabel('Longitude [deg]') ax3.yaxis.set_visible(False) ax3.set_ylabel('Cat. Mag.') ax3.set_xlabel('Frac. in bin') ax3.yaxis.set_label_position('right') ax3.yaxis.tick_right() ax4.set_xlabel('Depth [km]') ax4.xaxis.set_label_position('top') ax4.yaxis.tick_right() ax4.xaxis.tick_top() ax1.hist(Lon, **hkwargs) ax3.hist(Lat, orientation='horizontal', **hkwargs) ax4.hist(Dep, **hkwargs) sout = ax2.scatter(Lon, Lat, c=Dep, lw=1, cmap='Greys_r', s=10) ax2.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax2.yaxis.set_major_locator(plt.MultipleLocator(0.25)) cbaxes = inset_axes(ax2, width='2.5%', height='55%', loc='lower left') cbar = fig.colorbar(sout, cax=cbaxes) cbar.set_label('Depth [km]', rotation=90)
def spatial_distribution_plot(Lon: np.array, Lat: np.array, Dep: np.array) -> None: '[summary]\n\n Args:\n Lon (np.array): [description]\n Lat (np.array): [description]\n Dep (np.array): [description]\n ' hkwargs = dict(bottom=0.0, color='.8', edgecolor='k', rwidth=0.8, weights=(np.zeros_like(Lon) + (1.0 / len(Lon)))) fac = 2.75 fig = plt.figure(constrained_layout=False, figsize=((7 * fac), (3 * fac))) gs1 = fig.add_gridspec(nrows=3, ncols=3, left=0.05, right=0.48, wspace=0.15, hspace=0.15) ax1 = fig.add_subplot(gs1[(:1, 0:(- 1))]) ax2 = fig.add_subplot(gs1[(1:, :(- 1))]) ax3 = fig.add_subplot(gs1[(1:, (- 1):)]) ax4 = fig.add_subplot(gs1[(0, (- 1))]) ax1.xaxis.set_visible(False) ax1.set_ylabel('Frac. in bin') ax2.set_ylabel('Latitude [deg]') ax2.set_xlabel('Longitude [deg]') ax3.yaxis.set_visible(False) ax3.set_ylabel('Cat. Mag.') ax3.set_xlabel('Frac. in bin') ax3.yaxis.set_label_position('right') ax3.yaxis.tick_right() ax4.set_xlabel('Depth [km]') ax4.xaxis.set_label_position('top') ax4.yaxis.tick_right() ax4.xaxis.tick_top() ax1.hist(Lon, **hkwargs) ax3.hist(Lat, orientation='horizontal', **hkwargs) ax4.hist(Dep, **hkwargs) sout = ax2.scatter(Lon, Lat, c=Dep, lw=1, cmap='Greys_r', s=10) ax2.xaxis.set_major_locator(plt.MultipleLocator(0.5)) ax2.yaxis.set_major_locator(plt.MultipleLocator(0.25)) cbaxes = inset_axes(ax2, width='2.5%', height='55%', loc='lower left') cbar = fig.colorbar(sout, cax=cbaxes) cbar.set_label('Depth [km]', rotation=90)<|docstring|>[summary] Args: Lon (np.array): [description] Lat (np.array): [description] Dep (np.array): [description]<|endoftext|>
577ef67f9095c7790966721827725d53ad789315b8c3ce9bae1d116130c72ed7
def pytest_generate_tests(metafunc): '\n Parametrize tests over targets\n ' if ('target' in metafunc.fixturenames): targets = [('verilator', None)] if shutil.which('irun'): targets.append(('system-verilog', 'ncsim')) if shutil.which('vcs'): targets.append(('system-verilog', 'vcs')) if shutil.which('iverilog'): targets.append(('system-verilog', 'iverilog')) metafunc.parametrize('target,simulator', targets)
Parametrize tests over targets
tests/test_expressions.py
pytest_generate_tests
standanley/fault
0
python
def pytest_generate_tests(metafunc): '\n \n ' if ('target' in metafunc.fixturenames): targets = [('verilator', None)] if shutil.which('irun'): targets.append(('system-verilog', 'ncsim')) if shutil.which('vcs'): targets.append(('system-verilog', 'vcs')) if shutil.which('iverilog'): targets.append(('system-verilog', 'iverilog')) metafunc.parametrize('target,simulator', targets)
def pytest_generate_tests(metafunc): '\n \n ' if ('target' in metafunc.fixturenames): targets = [('verilator', None)] if shutil.which('irun'): targets.append(('system-verilog', 'ncsim')) if shutil.which('vcs'): targets.append(('system-verilog', 'vcs')) if shutil.which('iverilog'): targets.append(('system-verilog', 'iverilog')) metafunc.parametrize('target,simulator', targets)<|docstring|>Parametrize tests over targets<|endoftext|>
bec389bb93c0b9e87853efea167c7f8a75fb1cc5f193396025a10f89ac72ac79
@pytest.mark.parametrize('op', ['add', 'truediv', 'and_', 'xor', 'or_', 'lshift', 'rshift', 'mod', 'mul', 'rshift', 'sub', 'lt', 'le', 'eq', 'ne', 'gt', 'ge']) def test_binop_two_signals_setattr(target, simulator, op): '\n Test that we can and two output signals for an expect\n ' if (op == 'mod'): pytest.skip('urem missing from coreir verilog backend') BinaryOpCircuit = gen_binary_op_circuit(op) tester = fault.Tester(BinaryOpCircuit) for _ in range(5): (I0, I1) = gen_random_inputs(op) print(I0, I1) tester.eval() tester.circuit.O.expect(getattr(operator, op)(tester.circuit.I0_out, tester.circuit.I1_out)) tester.circuit.O.expect(getattr(operator, op)(tester.circuit.I0, tester.circuit.I1)) run_test(tester, target, simulator)
Test that we can and two output signals for an expect
tests/test_expressions.py
test_binop_two_signals_setattr
standanley/fault
0
python
@pytest.mark.parametrize('op', ['add', 'truediv', 'and_', 'xor', 'or_', 'lshift', 'rshift', 'mod', 'mul', 'rshift', 'sub', 'lt', 'le', 'eq', 'ne', 'gt', 'ge']) def test_binop_two_signals_setattr(target, simulator, op): '\n \n ' if (op == 'mod'): pytest.skip('urem missing from coreir verilog backend') BinaryOpCircuit = gen_binary_op_circuit(op) tester = fault.Tester(BinaryOpCircuit) for _ in range(5): (I0, I1) = gen_random_inputs(op) print(I0, I1) tester.eval() tester.circuit.O.expect(getattr(operator, op)(tester.circuit.I0_out, tester.circuit.I1_out)) tester.circuit.O.expect(getattr(operator, op)(tester.circuit.I0, tester.circuit.I1)) run_test(tester, target, simulator)
@pytest.mark.parametrize('op', ['add', 'truediv', 'and_', 'xor', 'or_', 'lshift', 'rshift', 'mod', 'mul', 'rshift', 'sub', 'lt', 'le', 'eq', 'ne', 'gt', 'ge']) def test_binop_two_signals_setattr(target, simulator, op): '\n \n ' if (op == 'mod'): pytest.skip('urem missing from coreir verilog backend') BinaryOpCircuit = gen_binary_op_circuit(op) tester = fault.Tester(BinaryOpCircuit) for _ in range(5): (I0, I1) = gen_random_inputs(op) print(I0, I1) tester.eval() tester.circuit.O.expect(getattr(operator, op)(tester.circuit.I0_out, tester.circuit.I1_out)) tester.circuit.O.expect(getattr(operator, op)(tester.circuit.I0, tester.circuit.I1)) run_test(tester, target, simulator)<|docstring|>Test that we can and two output signals for an expect<|endoftext|>