code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def _plot_completeness(ax, comw, start_time, end_time): ''' Adds completeness intervals to a plot ''' comw = np.array(comw) comp = np.column_stack([np.hstack([end_time, comw[:, 0], start_time]), np.hstack([comw[0, 1], comw[:, 1], comw[-1, 1]])]) ax.step(comp[:-1, 0], comp[1:, 1], linestyle='-', where="post", linewidth=3, color='brown')
Adds completeness intervals to a plot
Below is the the instruction that describes the task: ### Input: Adds completeness intervals to a plot ### Response: def _plot_completeness(ax, comw, start_time, end_time): ''' Adds completeness intervals to a plot ''' comw = np.array(comw) comp = np.column_stack([np.hstack([end_time, comw[:, 0], start_time]), np.hstack([comw[0, 1], comw[:, 1], comw[-1, 1]])]) ax.step(comp[:-1, 0], comp[1:, 1], linestyle='-', where="post", linewidth=3, color='brown')
def ensure_image_is_hex(input_path): """Return a path to a hex version of a firmware image. If the input file is already in hex format then input_path is returned and nothing is done. If it is not in hex format then an SCons action is added to convert it to hex and the target output file path is returned. A cache is kept so that each file is only converted once. Args: input_path (str): A path to a firmware image. Returns: str: The path to a hex version of input_path, this may be equal to input_path if it is already in hex format. """ family = utilities.get_family('module_settings.json') target = family.platform_independent_target() build_dir = target.build_dirs()['build'] if platform.system() == 'Windows': env = Environment(tools=['mingw'], ENV=os.environ) else: env = Environment(tools=['default'], ENV=os.environ) input_path = str(input_path) image_name = os.path.basename(input_path) root, ext = os.path.splitext(image_name) if len(ext) == 0: raise BuildError("Unknown file format or missing file extension in ensure_image_is_hex", file_name=input_path) file_format = ext[1:] if file_format == 'hex': return input_path if file_format == 'elf': new_file = os.path.join(build_dir, root + '.hex') if new_file not in CONVERTED_HEX_FILES: env.Command(new_file, input_path, action=Action("arm-none-eabi-objcopy -O ihex $SOURCE $TARGET", "Creating intel hex file from: $SOURCE")) CONVERTED_HEX_FILES.add(new_file) return new_file raise BuildError("Unknown file format extension in ensure_image_is_hex", file_name=input_path, extension=file_format)
Return a path to a hex version of a firmware image. If the input file is already in hex format then input_path is returned and nothing is done. If it is not in hex format then an SCons action is added to convert it to hex and the target output file path is returned. A cache is kept so that each file is only converted once. Args: input_path (str): A path to a firmware image. Returns: str: The path to a hex version of input_path, this may be equal to input_path if it is already in hex format.
Below is the the instruction that describes the task: ### Input: Return a path to a hex version of a firmware image. If the input file is already in hex format then input_path is returned and nothing is done. If it is not in hex format then an SCons action is added to convert it to hex and the target output file path is returned. A cache is kept so that each file is only converted once. Args: input_path (str): A path to a firmware image. Returns: str: The path to a hex version of input_path, this may be equal to input_path if it is already in hex format. ### Response: def ensure_image_is_hex(input_path): """Return a path to a hex version of a firmware image. If the input file is already in hex format then input_path is returned and nothing is done. If it is not in hex format then an SCons action is added to convert it to hex and the target output file path is returned. A cache is kept so that each file is only converted once. Args: input_path (str): A path to a firmware image. Returns: str: The path to a hex version of input_path, this may be equal to input_path if it is already in hex format. """ family = utilities.get_family('module_settings.json') target = family.platform_independent_target() build_dir = target.build_dirs()['build'] if platform.system() == 'Windows': env = Environment(tools=['mingw'], ENV=os.environ) else: env = Environment(tools=['default'], ENV=os.environ) input_path = str(input_path) image_name = os.path.basename(input_path) root, ext = os.path.splitext(image_name) if len(ext) == 0: raise BuildError("Unknown file format or missing file extension in ensure_image_is_hex", file_name=input_path) file_format = ext[1:] if file_format == 'hex': return input_path if file_format == 'elf': new_file = os.path.join(build_dir, root + '.hex') if new_file not in CONVERTED_HEX_FILES: env.Command(new_file, input_path, action=Action("arm-none-eabi-objcopy -O ihex $SOURCE $TARGET", "Creating intel hex file from: $SOURCE")) CONVERTED_HEX_FILES.add(new_file) return new_file raise BuildError("Unknown file format extension in ensure_image_is_hex", file_name=input_path, extension=file_format)
def get_row_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) gctx_file.close() return row_meta
Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values.
Below is the the instruction that describes the task: ### Input: Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values. ### Response: def get_row_metadata(gctx_file_path, convert_neg_666=True): """ Opens .gctx file and returns only row metadata Input: Mandatory: - gctx_file_path (str): full path to gctx file you want to parse. Optional: - convert_neg_666 (bool): whether to convert -666 values to num Output: - row_meta (pandas DataFrame): a DataFrame of all row metadata values. """ full_path = os.path.expanduser(gctx_file_path) # open file gctx_file = h5py.File(full_path, "r") row_dset = gctx_file[row_meta_group_node] row_meta = parse_metadata_df("row", row_dset, convert_neg_666) gctx_file.close() return row_meta
def disagg_prec_cascade(precip_daily, cascade_options, hourly=True,level=9, shift=0, test=False): """Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level """ if len(precip_daily) < 2: raise ValueError('Input data must have at least two elements.') # set missing values to zero: precip_daily = precip_daily.copy() missing_days = precip_daily.index[precip_daily.isnull()] precip_daily[missing_days] = 0 if hourly: si = 5 # index of first level else: si = level # statistics for branching into two bins wxxcum = np.zeros((7, 2, 4)) if isinstance(cascade_options, melodist.cascade.CascadeStatistics): # this is the standard case considering one data set for all levels # get cumulative probabilities for branching overwrite_stats = False for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] elif isinstance(cascade_options, list): if len(cascade_options) == si:#5 overwrite_stats = True list_casc = cascade_options else: raise ValueError('Cascade statistics list must have %s elements!' % si) else: raise TypeError('cascade_options has invalid type') # arrays for each level n = len(precip_daily) vdn1 = np.zeros(n*2) vdn2 = np.zeros(n*4) vdn3 = np.zeros(n*8) vdn4 = np.zeros(n*16) vdn5 = np.zeros(n*32) if not hourly: vdn6 = np.zeros(n*64) vdn7 = np.zeros(n*128) vdn8 = np.zeros(n*256) vdn9 = np.zeros(n*512) if level == 10 or level == 11: vdn10 = np.zeros(n*1024) if level == 11: vdn11 = np.zeros(n*2048) # class boundaries for histograms wclassbounds = np.array([0.0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # disaggregation for each level for l in range(1, si+1): if l == 1: vdn_in = precip_daily vdn_out = vdn1 elif l == 2: vdn_in = vdn_out vdn_out = vdn2 elif l == 3: vdn_in = vdn_out vdn_out = vdn3 elif l == 4: vdn_in = vdn_out vdn_out = vdn4 elif l == 5: vdn_in = vdn_out vdn_out = vdn5 elif l == 6: vdn_in = vdn_out vdn_out = vdn6 elif l == 7: vdn_in = vdn_out vdn_out = vdn7 elif l == 8: vdn_in = vdn_out vdn_out = vdn8 elif l == 9: vdn_in = vdn_out vdn_out = vdn9 elif l == 10: vdn_in = vdn_out vdn_out = vdn10 elif l == 11: vdn_in = vdn_out vdn_out = vdn11 si -= 1 if overwrite_stats: cascade_options = list_casc[si] for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] meanvol = cascade_options.threshold[0] else: meanvol = cascade_options.threshold[si] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold # meanvol = np.mean(vdn_in[vdn_in>0.]) # use values derived parameter by parameter estimation instead # see above j = 0 for i in range(0, len(vdn_in)): # it's raining now? if vdn_in[i] > 0: # determine type of box if i == 0: # only starting or isolated if vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting else: vbtype = cascade.BoxTypes.isolated elif i == len(vdn_in)-1: # only ending or isolated if vdn_in[i-1] > 0: vbtype = cascade.BoxTypes.ending else: vbtype = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn_in[i-1] == 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.isolated if vdn_in[i-1] == 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting if vdn_in[i-1] > 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.enclosed if vdn_in[i-1] > 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.ending # above or below mean? if vdn_in[i] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean # p = np.zeros((3, 1)) p[0] = cascade_options.p01[belowabove, vbtype-1] # index changed! p[1] = cascade_options.p10[belowabove, vbtype-1] p[2] = cascade_options.pxx[belowabove, vbtype-1] # draw a random number to determine the braching type rndp = np.random.random() if rndp <= p[0]: # first box 0, second box: 1 P(0/1) vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = vdn_in[i] j = j + 1 elif rndp > p[0] and rndp <= p[0] + p[1]: # first box 1, second box: 0 P(1/0) vdn_out[j] = vdn_in[i] j = j + 1 vdn_out[j] = 0.0 j = j + 1 else: # both boxes wet # we need a new random number rndw = np.random.random() # guess w1: for k in range(0, 7): if rndw <= wxxcum[k, belowabove, vbtype-1]: w1 = wclassbounds[k+1] - 1./14. # class center break vdn_out[j] = w1 * vdn_in[i] j = j + 1 vdn_out[j] = (1. - w1) * vdn_in[i] j = j + 1 # check results (in the previous version this error has never been observed) if w1 < 0 or w1 > 1: print('error') return else: # add two dry boxes vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = 0.0 j = j + 1 if hourly: # uniformly disaggregate 0.75 h values to 0.25 h values vdn_025 = np.zeros(len(vdn_out)*3) j = 0 for i in range(0, len(vdn_out)): for m in range(0, 3): vdn_025[j+m] = vdn_out[i] / 3. j = j + 3 # aggregate to hourly time steps vdn_025cs = np.cumsum(vdn_025) vdn = np.zeros(int(len(vdn_025)/4)) for i in range(0, len(vdn)+1): # for first hour take 4th item if i == 0: vdn[i] = vdn_025cs[3] elif i == 1: pass else: # >1 (starting with 2-1 = 1 item) vdn[i-1] = vdn_025cs[(i*4)-1] - vdn_025cs[(i*4)-5] disagg_precip = pd.Series(index=melodist.util.hourly_index(precip_daily.index), data=vdn) else: precip_sn = pd.Series(index= sub_level_index(precip_daily.index, level=level, fill_gaps=False), data=vdn_out) disagg_precip = precip_sn.resample('5min').sum() # set missing days to nan again: for date in missing_days: disagg_precip[ disagg_precip.index.date == date.date()] = np.nan # shifts the data by shift steps (fills with nan/cuts edge data ) if shift != 0: disagg_precip = disagg_precip.shift(shift) #? freq='1U') # return time series if test: if hourly: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn_025, disagg_precip else: if level == 9: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, precip_sn, disagg_precip elif level == 10: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, precip_sn, disagg_precip else: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, vdn11, precip_sn, disagg_precip else: return disagg_precip
Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level
Below is the the instruction that describes the task: ### Input: Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level ### Response: def disagg_prec_cascade(precip_daily, cascade_options, hourly=True,level=9, shift=0, test=False): """Precipitation disaggregation with cascade model (Olsson, 1998) Parameters ---------- precip_daily : pd.Series daily data hourly: Boolean (for an hourly resolution disaggregation) if False, then returns 5-min disaggregated precipitation (disaggregation level depending on the "level" variable) cascade_options : cascade object including statistical parameters for the cascade model shift : int shifts the precipitation data by shift steps (eg +7 for 7:00 to 6:00) test : bool test mode, returns time series of each cascade level """ if len(precip_daily) < 2: raise ValueError('Input data must have at least two elements.') # set missing values to zero: precip_daily = precip_daily.copy() missing_days = precip_daily.index[precip_daily.isnull()] precip_daily[missing_days] = 0 if hourly: si = 5 # index of first level else: si = level # statistics for branching into two bins wxxcum = np.zeros((7, 2, 4)) if isinstance(cascade_options, melodist.cascade.CascadeStatistics): # this is the standard case considering one data set for all levels # get cumulative probabilities for branching overwrite_stats = False for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] elif isinstance(cascade_options, list): if len(cascade_options) == si:#5 overwrite_stats = True list_casc = cascade_options else: raise ValueError('Cascade statistics list must have %s elements!' % si) else: raise TypeError('cascade_options has invalid type') # arrays for each level n = len(precip_daily) vdn1 = np.zeros(n*2) vdn2 = np.zeros(n*4) vdn3 = np.zeros(n*8) vdn4 = np.zeros(n*16) vdn5 = np.zeros(n*32) if not hourly: vdn6 = np.zeros(n*64) vdn7 = np.zeros(n*128) vdn8 = np.zeros(n*256) vdn9 = np.zeros(n*512) if level == 10 or level == 11: vdn10 = np.zeros(n*1024) if level == 11: vdn11 = np.zeros(n*2048) # class boundaries for histograms wclassbounds = np.array([0.0, 0.1429, 0.2857, 0.4286, 0.5714, 0.7143, 0.8571, 1.0]) # disaggregation for each level for l in range(1, si+1): if l == 1: vdn_in = precip_daily vdn_out = vdn1 elif l == 2: vdn_in = vdn_out vdn_out = vdn2 elif l == 3: vdn_in = vdn_out vdn_out = vdn3 elif l == 4: vdn_in = vdn_out vdn_out = vdn4 elif l == 5: vdn_in = vdn_out vdn_out = vdn5 elif l == 6: vdn_in = vdn_out vdn_out = vdn6 elif l == 7: vdn_in = vdn_out vdn_out = vdn7 elif l == 8: vdn_in = vdn_out vdn_out = vdn8 elif l == 9: vdn_in = vdn_out vdn_out = vdn9 elif l == 10: vdn_in = vdn_out vdn_out = vdn10 elif l == 11: vdn_in = vdn_out vdn_out = vdn11 si -= 1 if overwrite_stats: cascade_options = list_casc[si] for k in range(0, 7): wxxcum[k, :, :] = cascade_options.wxx[k, :, :] if k > 0: wxxcum[k, :, :] = wxxcum[k-1, :, :] + wxxcum[k, :, :] meanvol = cascade_options.threshold[0] else: meanvol = cascade_options.threshold[si] # evaluate mean rainfall intensity for wet boxes # these values should be determined during the aggregation phase!!!!! # mean volume threshold # meanvol = np.mean(vdn_in[vdn_in>0.]) # use values derived parameter by parameter estimation instead # see above j = 0 for i in range(0, len(vdn_in)): # it's raining now? if vdn_in[i] > 0: # determine type of box if i == 0: # only starting or isolated if vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting else: vbtype = cascade.BoxTypes.isolated elif i == len(vdn_in)-1: # only ending or isolated if vdn_in[i-1] > 0: vbtype = cascade.BoxTypes.ending else: vbtype = cascade.BoxTypes.isolated else: # neither at at the end nor at the beginning if vdn_in[i-1] == 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.isolated if vdn_in[i-1] == 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.starting if vdn_in[i-1] > 0 and vdn_in[i+1] > 0: vbtype = cascade.BoxTypes.enclosed if vdn_in[i-1] > 0 and vdn_in[i+1] == 0: vbtype = cascade.BoxTypes.ending # above or below mean? if vdn_in[i] > meanvol: belowabove = 1 # above mean else: belowabove = 0 # below mean # p = np.zeros((3, 1)) p[0] = cascade_options.p01[belowabove, vbtype-1] # index changed! p[1] = cascade_options.p10[belowabove, vbtype-1] p[2] = cascade_options.pxx[belowabove, vbtype-1] # draw a random number to determine the braching type rndp = np.random.random() if rndp <= p[0]: # first box 0, second box: 1 P(0/1) vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = vdn_in[i] j = j + 1 elif rndp > p[0] and rndp <= p[0] + p[1]: # first box 1, second box: 0 P(1/0) vdn_out[j] = vdn_in[i] j = j + 1 vdn_out[j] = 0.0 j = j + 1 else: # both boxes wet # we need a new random number rndw = np.random.random() # guess w1: for k in range(0, 7): if rndw <= wxxcum[k, belowabove, vbtype-1]: w1 = wclassbounds[k+1] - 1./14. # class center break vdn_out[j] = w1 * vdn_in[i] j = j + 1 vdn_out[j] = (1. - w1) * vdn_in[i] j = j + 1 # check results (in the previous version this error has never been observed) if w1 < 0 or w1 > 1: print('error') return else: # add two dry boxes vdn_out[j] = 0.0 j = j + 1 vdn_out[j] = 0.0 j = j + 1 if hourly: # uniformly disaggregate 0.75 h values to 0.25 h values vdn_025 = np.zeros(len(vdn_out)*3) j = 0 for i in range(0, len(vdn_out)): for m in range(0, 3): vdn_025[j+m] = vdn_out[i] / 3. j = j + 3 # aggregate to hourly time steps vdn_025cs = np.cumsum(vdn_025) vdn = np.zeros(int(len(vdn_025)/4)) for i in range(0, len(vdn)+1): # for first hour take 4th item if i == 0: vdn[i] = vdn_025cs[3] elif i == 1: pass else: # >1 (starting with 2-1 = 1 item) vdn[i-1] = vdn_025cs[(i*4)-1] - vdn_025cs[(i*4)-5] disagg_precip = pd.Series(index=melodist.util.hourly_index(precip_daily.index), data=vdn) else: precip_sn = pd.Series(index= sub_level_index(precip_daily.index, level=level, fill_gaps=False), data=vdn_out) disagg_precip = precip_sn.resample('5min').sum() # set missing days to nan again: for date in missing_days: disagg_precip[ disagg_precip.index.date == date.date()] = np.nan # shifts the data by shift steps (fills with nan/cuts edge data ) if shift != 0: disagg_precip = disagg_precip.shift(shift) #? freq='1U') # return time series if test: if hourly: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn_025, disagg_precip else: if level == 9: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, precip_sn, disagg_precip elif level == 10: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, precip_sn, disagg_precip else: return vdn1, vdn2, vdn3, vdn4, vdn5, vdn6, vdn7, vdn8, vdn9, vdn10, vdn11, precip_sn, disagg_precip else: return disagg_precip
def mendelian_errors(args): """ %prog mendelian_errors STR-Mendelian-errors.csv Plot Mendelian errors as calculated by mendelian(). File `STR-Mendelian-errors.csv` looks like: ,Duos - Mendelian errors,Trios - Mendelian errors SCA36,1.40%,0.60% ULD,0.30%,1.50% BPES,0.00%,1.80% One TRED disease per line, followed by duo errors and trio errors. """ p = OptionParser(mendelian_errors.__doc__) opts, args, iopts = p.set_image_options(args, figsize="6x6") if len(args) != 1: sys.exit(not p.print_help()) csvfile, = args fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ymin = -.2 df = pd.read_csv(csvfile) data = [] for i, d in df.iterrows(): if d['TRED'].split()[0] in ignore: logging.debug("Ignore {}".format(d['TRED'])) continue data.append(d) treds, duos, trios = zip(*data) ntreds = len(treds) ticks = range(ntreds) treds = [x.split()[0] for x in treds] duos = [float(x.rstrip('%')) for x in duos] trios = [float(x.rstrip('%')) for x in trios] for tick, duo, trio in zip(ticks, duos, trios): m = max(duo, trio) ax.plot([tick, tick], [ymin, m], "-", lw=2, color='lightslategray') duos, = ax.plot(duos, "o", mfc='w', mec='g') trios, = ax.plot(trios, "o", mfc='w', mec='b') ax.set_title("Mendelian errors based on trios and duos in HLI samples") nduos = "Mendelian errors in 362 duos" ntrios = "Mendelian errors in 339 trios" ax.legend([trios, duos], [ntrios, nduos], loc='best') ax.set_xticks(ticks) ax.set_xticklabels(treds, rotation=45, ha="right", size=8) yticklabels = [int(x) for x in ax.get_yticks()] ax.set_yticklabels(yticklabels, family='Helvetica') ax.set_ylabel("Mendelian errors (\%)") ax.set_ylim(ymin, 20) normalize_axes(root) image_name = "mendelian_errors." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
%prog mendelian_errors STR-Mendelian-errors.csv Plot Mendelian errors as calculated by mendelian(). File `STR-Mendelian-errors.csv` looks like: ,Duos - Mendelian errors,Trios - Mendelian errors SCA36,1.40%,0.60% ULD,0.30%,1.50% BPES,0.00%,1.80% One TRED disease per line, followed by duo errors and trio errors.
Below is the the instruction that describes the task: ### Input: %prog mendelian_errors STR-Mendelian-errors.csv Plot Mendelian errors as calculated by mendelian(). File `STR-Mendelian-errors.csv` looks like: ,Duos - Mendelian errors,Trios - Mendelian errors SCA36,1.40%,0.60% ULD,0.30%,1.50% BPES,0.00%,1.80% One TRED disease per line, followed by duo errors and trio errors. ### Response: def mendelian_errors(args): """ %prog mendelian_errors STR-Mendelian-errors.csv Plot Mendelian errors as calculated by mendelian(). File `STR-Mendelian-errors.csv` looks like: ,Duos - Mendelian errors,Trios - Mendelian errors SCA36,1.40%,0.60% ULD,0.30%,1.50% BPES,0.00%,1.80% One TRED disease per line, followed by duo errors and trio errors. """ p = OptionParser(mendelian_errors.__doc__) opts, args, iopts = p.set_image_options(args, figsize="6x6") if len(args) != 1: sys.exit(not p.print_help()) csvfile, = args fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) ymin = -.2 df = pd.read_csv(csvfile) data = [] for i, d in df.iterrows(): if d['TRED'].split()[0] in ignore: logging.debug("Ignore {}".format(d['TRED'])) continue data.append(d) treds, duos, trios = zip(*data) ntreds = len(treds) ticks = range(ntreds) treds = [x.split()[0] for x in treds] duos = [float(x.rstrip('%')) for x in duos] trios = [float(x.rstrip('%')) for x in trios] for tick, duo, trio in zip(ticks, duos, trios): m = max(duo, trio) ax.plot([tick, tick], [ymin, m], "-", lw=2, color='lightslategray') duos, = ax.plot(duos, "o", mfc='w', mec='g') trios, = ax.plot(trios, "o", mfc='w', mec='b') ax.set_title("Mendelian errors based on trios and duos in HLI samples") nduos = "Mendelian errors in 362 duos" ntrios = "Mendelian errors in 339 trios" ax.legend([trios, duos], [ntrios, nduos], loc='best') ax.set_xticks(ticks) ax.set_xticklabels(treds, rotation=45, ha="right", size=8) yticklabels = [int(x) for x in ax.get_yticks()] ax.set_yticklabels(yticklabels, family='Helvetica') ax.set_ylabel("Mendelian errors (\%)") ax.set_ylim(ymin, 20) normalize_axes(root) image_name = "mendelian_errors." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def _update_scheme(self, scheme, ancestors): """ Updates the current scheme based off special pre-defined keys and retruns a new updated scheme. :param scheme: A :dict:, The scheme defining the validations. :param ancestors: A :OrderedDict: that provides a history of its ancestors. :rtype: A new :dict: with updated scheme values. """ if not isinstance(ancestors, OrderedDict): raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors))) if not isinstance(scheme, dict): raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme))) # TODO: what if we have more than one scheme :P need to fix this. definitions = ROOT_SCHEME.get('_') if 'inherit' in scheme: scheme = self._scheme_propagation(scheme, definitions) updated_scheme = {} for scheme_key in six.iterkeys(scheme): if not isinstance(scheme_key, six.string_types): raise TypeError('scheme keys are required to be strings. type: {0} was passed.'.format(scheme_key)) if '@' in scheme_key: ref = scheme_key[1:] scheme_reference = self._scheme_references.get(ref) if not scheme_reference: raise ConfigValidationException(ancestors, ref, scheme_reference, 'required', scheme) for reference_key in scheme_reference['keys']: scheme_reference['scheme'].update(scheme[scheme_key]) updated_scheme[reference_key] = scheme_reference['scheme'] elif '~' in scheme_key: ref = scheme_key[1:] scheme_reference = self._scheme_references.get(ref) if not scheme_reference: raise LookupError("was unable to find {0} in scheme reference.".format(ref)) for reference_key in scheme_reference['keys']: updated_scheme[reference_key] = scheme[scheme_key] scheme.update(updated_scheme) return scheme
Updates the current scheme based off special pre-defined keys and retruns a new updated scheme. :param scheme: A :dict:, The scheme defining the validations. :param ancestors: A :OrderedDict: that provides a history of its ancestors. :rtype: A new :dict: with updated scheme values.
Below is the the instruction that describes the task: ### Input: Updates the current scheme based off special pre-defined keys and retruns a new updated scheme. :param scheme: A :dict:, The scheme defining the validations. :param ancestors: A :OrderedDict: that provides a history of its ancestors. :rtype: A new :dict: with updated scheme values. ### Response: def _update_scheme(self, scheme, ancestors): """ Updates the current scheme based off special pre-defined keys and retruns a new updated scheme. :param scheme: A :dict:, The scheme defining the validations. :param ancestors: A :OrderedDict: that provides a history of its ancestors. :rtype: A new :dict: with updated scheme values. """ if not isinstance(ancestors, OrderedDict): raise TypeError("ancestors must be an OrderedDict. type: {0} was passed.".format(type(ancestors))) if not isinstance(scheme, dict): raise TypeError('scheme must be a dict. type: {0} was passed'.format(type(scheme))) # TODO: what if we have more than one scheme :P need to fix this. definitions = ROOT_SCHEME.get('_') if 'inherit' in scheme: scheme = self._scheme_propagation(scheme, definitions) updated_scheme = {} for scheme_key in six.iterkeys(scheme): if not isinstance(scheme_key, six.string_types): raise TypeError('scheme keys are required to be strings. type: {0} was passed.'.format(scheme_key)) if '@' in scheme_key: ref = scheme_key[1:] scheme_reference = self._scheme_references.get(ref) if not scheme_reference: raise ConfigValidationException(ancestors, ref, scheme_reference, 'required', scheme) for reference_key in scheme_reference['keys']: scheme_reference['scheme'].update(scheme[scheme_key]) updated_scheme[reference_key] = scheme_reference['scheme'] elif '~' in scheme_key: ref = scheme_key[1:] scheme_reference = self._scheme_references.get(ref) if not scheme_reference: raise LookupError("was unable to find {0} in scheme reference.".format(ref)) for reference_key in scheme_reference['keys']: updated_scheme[reference_key] = scheme[scheme_key] scheme.update(updated_scheme) return scheme
def BT(cpu, dest, src): """ Bit Test. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value: - If the bit base operand specifies a register, the instruction takes the modulo 16, 32, or 64 of the bit offset operand (modulo size depends on the mode and register size; 64-bit operands are available only in 64-bit mode). - If the bit base operand specifies a memory location, the operand represents the address of the byte in memory that contains the bit base (bit 0 of the specified byte) of the bit string. The range of the bit position that can be referenced by the offset operand depends on the operand size. :param cpu: current CPU. :param dest: bit base. :param src: bit offset. """ if dest.type == 'register': cpu.CF = ((dest.read() >> (src.read() % dest.size)) & 1) != 0 elif dest.type == 'memory': addr, pos = cpu._getMemoryBit(dest, src) base, size, ty = cpu.get_descriptor(cpu.DS) value = cpu.read_int(addr + base, 8) cpu.CF = Operators.EXTRACT(value, pos, 1) == 1 else: raise NotImplementedError(f"Unknown operand for BT: {dest.type}")
Bit Test. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value: - If the bit base operand specifies a register, the instruction takes the modulo 16, 32, or 64 of the bit offset operand (modulo size depends on the mode and register size; 64-bit operands are available only in 64-bit mode). - If the bit base operand specifies a memory location, the operand represents the address of the byte in memory that contains the bit base (bit 0 of the specified byte) of the bit string. The range of the bit position that can be referenced by the offset operand depends on the operand size. :param cpu: current CPU. :param dest: bit base. :param src: bit offset.
Below is the the instruction that describes the task: ### Input: Bit Test. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value: - If the bit base operand specifies a register, the instruction takes the modulo 16, 32, or 64 of the bit offset operand (modulo size depends on the mode and register size; 64-bit operands are available only in 64-bit mode). - If the bit base operand specifies a memory location, the operand represents the address of the byte in memory that contains the bit base (bit 0 of the specified byte) of the bit string. The range of the bit position that can be referenced by the offset operand depends on the operand size. :param cpu: current CPU. :param dest: bit base. :param src: bit offset. ### Response: def BT(cpu, dest, src): """ Bit Test. Selects the bit in a bit string (specified with the first operand, called the bit base) at the bit-position designated by the bit offset (specified by the second operand) and stores the value of the bit in the CF flag. The bit base operand can be a register or a memory location; the bit offset operand can be a register or an immediate value: - If the bit base operand specifies a register, the instruction takes the modulo 16, 32, or 64 of the bit offset operand (modulo size depends on the mode and register size; 64-bit operands are available only in 64-bit mode). - If the bit base operand specifies a memory location, the operand represents the address of the byte in memory that contains the bit base (bit 0 of the specified byte) of the bit string. The range of the bit position that can be referenced by the offset operand depends on the operand size. :param cpu: current CPU. :param dest: bit base. :param src: bit offset. """ if dest.type == 'register': cpu.CF = ((dest.read() >> (src.read() % dest.size)) & 1) != 0 elif dest.type == 'memory': addr, pos = cpu._getMemoryBit(dest, src) base, size, ty = cpu.get_descriptor(cpu.DS) value = cpu.read_int(addr + base, 8) cpu.CF = Operators.EXTRACT(value, pos, 1) == 1 else: raise NotImplementedError(f"Unknown operand for BT: {dest.type}")
def unpause(self, id): # pylint: disable=invalid-name,redefined-builtin """Unpause a running result. :param id: Result ID as an int. """ return self.service.post(self.base+str(id)+'/unpause/')
Unpause a running result. :param id: Result ID as an int.
Below is the the instruction that describes the task: ### Input: Unpause a running result. :param id: Result ID as an int. ### Response: def unpause(self, id): # pylint: disable=invalid-name,redefined-builtin """Unpause a running result. :param id: Result ID as an int. """ return self.service.post(self.base+str(id)+'/unpause/')
def WriteSymlink(self, src_arcname, dst_arcname): """Writes a symlink into the archive.""" info = self._tar_fd.tarinfo() info.tarfile = self._tar_fd info.name = SmartStr(dst_arcname) info.size = 0 info.mtime = time.time() info.type = tarfile.SYMTYPE info.linkname = SmartStr(src_arcname) self._tar_fd.addfile(info) return self._stream.GetValueAndReset()
Writes a symlink into the archive.
Below is the the instruction that describes the task: ### Input: Writes a symlink into the archive. ### Response: def WriteSymlink(self, src_arcname, dst_arcname): """Writes a symlink into the archive.""" info = self._tar_fd.tarinfo() info.tarfile = self._tar_fd info.name = SmartStr(dst_arcname) info.size = 0 info.mtime = time.time() info.type = tarfile.SYMTYPE info.linkname = SmartStr(src_arcname) self._tar_fd.addfile(info) return self._stream.GetValueAndReset()
def set_matrix(self, matrix): """Sets the pattern’s transformation matrix to :obj:`matrix`. This matrix is a transformation from user space to pattern space. When a pattern is first created it always has the identity matrix for its transformation matrix, which means that pattern space is initially identical to user space. **Important:** Please note that the direction of this transformation matrix is from user space to pattern space. This means that if you imagine the flow from a pattern to user space (and on to device space), then coordinates in that flow will be transformed by the inverse of the pattern matrix. For example, if you want to make a pattern appear twice as large as it does by default the correct code to use is:: pattern.set_matrix(Matrix(xx=0.5, yy=0.5)) Meanwhile, using values of 2 rather than 0.5 in the code above would cause the pattern to appear at half of its default size. Also, please note the discussion of the user-space locking semantics of :meth:`Context.set_source`. :param matrix: A :class:`Matrix` to be copied into the pattern. """ cairo.cairo_pattern_set_matrix(self._pointer, matrix._pointer) self._check_status()
Sets the pattern’s transformation matrix to :obj:`matrix`. This matrix is a transformation from user space to pattern space. When a pattern is first created it always has the identity matrix for its transformation matrix, which means that pattern space is initially identical to user space. **Important:** Please note that the direction of this transformation matrix is from user space to pattern space. This means that if you imagine the flow from a pattern to user space (and on to device space), then coordinates in that flow will be transformed by the inverse of the pattern matrix. For example, if you want to make a pattern appear twice as large as it does by default the correct code to use is:: pattern.set_matrix(Matrix(xx=0.5, yy=0.5)) Meanwhile, using values of 2 rather than 0.5 in the code above would cause the pattern to appear at half of its default size. Also, please note the discussion of the user-space locking semantics of :meth:`Context.set_source`. :param matrix: A :class:`Matrix` to be copied into the pattern.
Below is the the instruction that describes the task: ### Input: Sets the pattern’s transformation matrix to :obj:`matrix`. This matrix is a transformation from user space to pattern space. When a pattern is first created it always has the identity matrix for its transformation matrix, which means that pattern space is initially identical to user space. **Important:** Please note that the direction of this transformation matrix is from user space to pattern space. This means that if you imagine the flow from a pattern to user space (and on to device space), then coordinates in that flow will be transformed by the inverse of the pattern matrix. For example, if you want to make a pattern appear twice as large as it does by default the correct code to use is:: pattern.set_matrix(Matrix(xx=0.5, yy=0.5)) Meanwhile, using values of 2 rather than 0.5 in the code above would cause the pattern to appear at half of its default size. Also, please note the discussion of the user-space locking semantics of :meth:`Context.set_source`. :param matrix: A :class:`Matrix` to be copied into the pattern. ### Response: def set_matrix(self, matrix): """Sets the pattern’s transformation matrix to :obj:`matrix`. This matrix is a transformation from user space to pattern space. When a pattern is first created it always has the identity matrix for its transformation matrix, which means that pattern space is initially identical to user space. **Important:** Please note that the direction of this transformation matrix is from user space to pattern space. This means that if you imagine the flow from a pattern to user space (and on to device space), then coordinates in that flow will be transformed by the inverse of the pattern matrix. For example, if you want to make a pattern appear twice as large as it does by default the correct code to use is:: pattern.set_matrix(Matrix(xx=0.5, yy=0.5)) Meanwhile, using values of 2 rather than 0.5 in the code above would cause the pattern to appear at half of its default size. Also, please note the discussion of the user-space locking semantics of :meth:`Context.set_source`. :param matrix: A :class:`Matrix` to be copied into the pattern. """ cairo.cairo_pattern_set_matrix(self._pointer, matrix._pointer) self._check_status()
def can_access_objective_hierarchy(self): """Tests if this user can perform hierarchy queries. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an an application that may not offer traversal functions to unauthorized users. return: (boolean) - false if hierarchy traversal methods are not authorized, true otherwise compliance: mandatory - This method must be implemented. """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['objectiveHierarchyHints']['canAccessHierarchy']
Tests if this user can perform hierarchy queries. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an an application that may not offer traversal functions to unauthorized users. return: (boolean) - false if hierarchy traversal methods are not authorized, true otherwise compliance: mandatory - This method must be implemented.
Below is the the instruction that describes the task: ### Input: Tests if this user can perform hierarchy queries. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an an application that may not offer traversal functions to unauthorized users. return: (boolean) - false if hierarchy traversal methods are not authorized, true otherwise compliance: mandatory - This method must be implemented. ### Response: def can_access_objective_hierarchy(self): """Tests if this user can perform hierarchy queries. A return of true does not guarantee successful authorization. A return of false indicates that it is known all methods in this session will result in a PermissionDenied. This is intended as a hint to an an application that may not offer traversal functions to unauthorized users. return: (boolean) - false if hierarchy traversal methods are not authorized, true otherwise compliance: mandatory - This method must be implemented. """ url_path = construct_url('authorization', bank_id=self._catalog_idstr) return self._get_request(url_path)['objectiveHierarchyHints']['canAccessHierarchy']
def select_io( hash ): """ Returns the relevant i/o for a method whose call is characterized by the hash :param hash: The hash for the CallDescriptor :rtype list(tuple( hash, stack, methodname, returnval, args, packet_num )): """ load_cache(True) global CACHE_ res = [] record_used('cache', hash) for d in CACHE_['cache'].get(hash, {}).values(): d = pickle.loads(d) res += [(d['hash'], d['stack'], d['methodname'], d['returnval'], d['args'], d['packet_num'])] return res
Returns the relevant i/o for a method whose call is characterized by the hash :param hash: The hash for the CallDescriptor :rtype list(tuple( hash, stack, methodname, returnval, args, packet_num )):
Below is the the instruction that describes the task: ### Input: Returns the relevant i/o for a method whose call is characterized by the hash :param hash: The hash for the CallDescriptor :rtype list(tuple( hash, stack, methodname, returnval, args, packet_num )): ### Response: def select_io( hash ): """ Returns the relevant i/o for a method whose call is characterized by the hash :param hash: The hash for the CallDescriptor :rtype list(tuple( hash, stack, methodname, returnval, args, packet_num )): """ load_cache(True) global CACHE_ res = [] record_used('cache', hash) for d in CACHE_['cache'].get(hash, {}).values(): d = pickle.loads(d) res += [(d['hash'], d['stack'], d['methodname'], d['returnval'], d['args'], d['packet_num'])] return res
def processAbf(abfFname,saveAs=False,dpi=100,show=True): """ automatically generate a single representative image for an ABF. If saveAs is given (full path of a jpg of png file), the image will be saved. Otherwise, the image will pop up in a matplotlib window. """ if not type(abfFname) is str or not len(abfFname)>3: return abf=swhlab.ABF(abfFname) plot=swhlab.plotting.ABFplot(abf) plot.figure_height=6 plot.figure_width=10 plot.subplot=False plot.figure(True) if abf.get_protocol_sequence(0)==abf.get_protocol_sequence(1) or abf.sweeps<2: # same protocol every time if abf.lengthMinutes<2: # short (probably a memtest or tau) ax1=plt.subplot(211) plot.figure_sweeps() plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps)) plt.gca().get_xaxis().set_visible(False) plt.subplot(212,sharex=ax1) plot.figure_protocol() plt.title("") else: # long (probably a drug experiment) plot.figure_chronological() else: # protocol changes every sweep plots=[211,212] # assume we want 2 images if abf.units=='mV': # maybe it's something with APs? ap=swhlab.AP(abf) # go ahead and do AP detection ap.detect() # try to detect APs if len(ap.APs): # if we found some plots=[221,223,222,224] # get ready for 4 images ax1=plt.subplot(plots[0]) plot.figure_sweeps() plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps)) plt.gca().get_xaxis().set_visible(False) plt.subplot(plots[1],sharex=ax1) plot.figure_protocols() plt.title("protocol") if len(plots)>2: # assume we want to look at the first AP ax2=plt.subplot(plots[2]) plot.rainbow=False plot.kwargs["color"]='b' plot.figure_chronological() plt.gca().get_xaxis().set_visible(False) plt.title("first AP magnitude") # velocity plot plt.subplot(plots[3],sharex=ax2) plot.abf.derivative=True plot.rainbow=False plot.traceColor='r' plot.figure_chronological() plt.axis([ap.APs[0]["T"]-.05,ap.APs[0]["T"]+.05,None,None]) plt.title("first AP velocity") if saveAs: print("saving",os.path.abspath(saveAs)) plt.savefig(os.path.abspath(saveAs),dpi=dpi) return if show: plot.show()
automatically generate a single representative image for an ABF. If saveAs is given (full path of a jpg of png file), the image will be saved. Otherwise, the image will pop up in a matplotlib window.
Below is the the instruction that describes the task: ### Input: automatically generate a single representative image for an ABF. If saveAs is given (full path of a jpg of png file), the image will be saved. Otherwise, the image will pop up in a matplotlib window. ### Response: def processAbf(abfFname,saveAs=False,dpi=100,show=True): """ automatically generate a single representative image for an ABF. If saveAs is given (full path of a jpg of png file), the image will be saved. Otherwise, the image will pop up in a matplotlib window. """ if not type(abfFname) is str or not len(abfFname)>3: return abf=swhlab.ABF(abfFname) plot=swhlab.plotting.ABFplot(abf) plot.figure_height=6 plot.figure_width=10 plot.subplot=False plot.figure(True) if abf.get_protocol_sequence(0)==abf.get_protocol_sequence(1) or abf.sweeps<2: # same protocol every time if abf.lengthMinutes<2: # short (probably a memtest or tau) ax1=plt.subplot(211) plot.figure_sweeps() plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps)) plt.gca().get_xaxis().set_visible(False) plt.subplot(212,sharex=ax1) plot.figure_protocol() plt.title("") else: # long (probably a drug experiment) plot.figure_chronological() else: # protocol changes every sweep plots=[211,212] # assume we want 2 images if abf.units=='mV': # maybe it's something with APs? ap=swhlab.AP(abf) # go ahead and do AP detection ap.detect() # try to detect APs if len(ap.APs): # if we found some plots=[221,223,222,224] # get ready for 4 images ax1=plt.subplot(plots[0]) plot.figure_sweeps() plt.title("{} ({} sweeps)".format(abf.ID,abf.sweeps)) plt.gca().get_xaxis().set_visible(False) plt.subplot(plots[1],sharex=ax1) plot.figure_protocols() plt.title("protocol") if len(plots)>2: # assume we want to look at the first AP ax2=plt.subplot(plots[2]) plot.rainbow=False plot.kwargs["color"]='b' plot.figure_chronological() plt.gca().get_xaxis().set_visible(False) plt.title("first AP magnitude") # velocity plot plt.subplot(plots[3],sharex=ax2) plot.abf.derivative=True plot.rainbow=False plot.traceColor='r' plot.figure_chronological() plt.axis([ap.APs[0]["T"]-.05,ap.APs[0]["T"]+.05,None,None]) plt.title("first AP velocity") if saveAs: print("saving",os.path.abspath(saveAs)) plt.savefig(os.path.abspath(saveAs),dpi=dpi) return if show: plot.show()
def iterSourceCode(paths): """ Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is. """ for path in paths: if os.path.isdir(path): for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: if filename.endswith('.py'): yield os.path.join(dirpath, filename) else: yield path
Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is.
Below is the the instruction that describes the task: ### Input: Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is. ### Response: def iterSourceCode(paths): """ Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is. """ for path in paths: if os.path.isdir(path): for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: if filename.endswith('.py'): yield os.path.join(dirpath, filename) else: yield path
def run(): """The entry point from the praw-multiprocess utility.""" parser = OptionParser(version='%prog {0}'.format(__version__)) parser.add_option('-a', '--addr', default='localhost', help=('The address or host to listen on. Specify -a ' '0.0.0.0 to listen on all addresses. ' 'Default: localhost')) parser.add_option('-p', '--port', type='int', default='10101', help=('The port to listen for requests on. ' 'Default: 10101')) options, _ = parser.parse_args() try: server = ThreadingTCPServer((options.addr, options.port), RequestHandler) except (socket.error, socket.gaierror) as exc: # Handle bind errors print(exc) sys.exit(1) print('Listening on {0} port {1}'.format(options.addr, options.port)) try: server.serve_forever() # pylint: disable=E1101 except KeyboardInterrupt: server.socket.close() # pylint: disable=E1101 RequestHandler.http.close() print('Goodbye!')
The entry point from the praw-multiprocess utility.
Below is the the instruction that describes the task: ### Input: The entry point from the praw-multiprocess utility. ### Response: def run(): """The entry point from the praw-multiprocess utility.""" parser = OptionParser(version='%prog {0}'.format(__version__)) parser.add_option('-a', '--addr', default='localhost', help=('The address or host to listen on. Specify -a ' '0.0.0.0 to listen on all addresses. ' 'Default: localhost')) parser.add_option('-p', '--port', type='int', default='10101', help=('The port to listen for requests on. ' 'Default: 10101')) options, _ = parser.parse_args() try: server = ThreadingTCPServer((options.addr, options.port), RequestHandler) except (socket.error, socket.gaierror) as exc: # Handle bind errors print(exc) sys.exit(1) print('Listening on {0} port {1}'.format(options.addr, options.port)) try: server.serve_forever() # pylint: disable=E1101 except KeyboardInterrupt: server.socket.close() # pylint: disable=E1101 RequestHandler.http.close() print('Goodbye!')
def check_status(self, ignore=(), status=None): """ Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting. """ self.SHARD_CHECKS = [ {'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count}, {'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status} ] if status is None: status = self.clusterstatus() out = {} for collection in status: out[collection] = {} out[collection]['coll_status'] = True # Means it's fine out[collection]['coll_messages'] = [] for shard in status[collection]: self.logger.debug("Checking {}/{}".format(collection, shard)) s_dict = status[collection][shard] for check in self.SHARD_CHECKS: if check['check_msg'] in ignore: continue res = check['f'](s_dict) if not res: out[collection]['coll_status'] = False if check['check_msg'] not in out[collection]['coll_messages']: out[collection]['coll_messages'].append(check['check_msg']) self.logger.debug(s_dict) return out
Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting.
Below is the the instruction that describes the task: ### Input: Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting. ### Response: def check_status(self, ignore=(), status=None): """ Checks status of each collection and shard to make sure that: a) Cluster state is active b) Number of docs matches across replicas for a given shard. Returns a dict of results for custom alerting. """ self.SHARD_CHECKS = [ {'check_msg': 'Bad Core Count Check', 'f': self._check_shard_count}, {'check_msg': 'Bad Shard Cluster Status', 'f': self._check_shard_status} ] if status is None: status = self.clusterstatus() out = {} for collection in status: out[collection] = {} out[collection]['coll_status'] = True # Means it's fine out[collection]['coll_messages'] = [] for shard in status[collection]: self.logger.debug("Checking {}/{}".format(collection, shard)) s_dict = status[collection][shard] for check in self.SHARD_CHECKS: if check['check_msg'] in ignore: continue res = check['f'](s_dict) if not res: out[collection]['coll_status'] = False if check['check_msg'] not in out[collection]['coll_messages']: out[collection]['coll_messages'].append(check['check_msg']) self.logger.debug(s_dict) return out
def deserialize_object(buffers, g=None): """Reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- bufs : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers. """ bufs = list(buffers) pobj = buffer_to_bytes_py2(bufs.pop(0)) canned = pickle.loads(pobj) if istype(canned, sequence_types) and len(canned) < MAX_ITEMS: for c in canned: _restore_buffers(c, bufs) newobj = uncan_sequence(canned, g) elif istype(canned, dict) and len(canned) < MAX_ITEMS: newobj = {} for k in sorted(canned): c = canned[k] _restore_buffers(c, bufs) newobj[k] = uncan(c, g) else: _restore_buffers(canned, bufs) newobj = uncan(canned, g) return newobj, bufs
Reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- bufs : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers.
Below is the the instruction that describes the task: ### Input: Reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- bufs : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers. ### Response: def deserialize_object(buffers, g=None): """Reconstruct an object serialized by serialize_object from data buffers. Parameters ---------- bufs : list of buffers/bytes g : globals to be used when uncanning Returns ------- (newobj, bufs) : unpacked object, and the list of remaining unused buffers. """ bufs = list(buffers) pobj = buffer_to_bytes_py2(bufs.pop(0)) canned = pickle.loads(pobj) if istype(canned, sequence_types) and len(canned) < MAX_ITEMS: for c in canned: _restore_buffers(c, bufs) newobj = uncan_sequence(canned, g) elif istype(canned, dict) and len(canned) < MAX_ITEMS: newobj = {} for k in sorted(canned): c = canned[k] _restore_buffers(c, bufs) newobj[k] = uncan(c, g) else: _restore_buffers(canned, bufs) newobj = uncan(canned, g) return newobj, bufs
def print_error(error: "GraphQLError") -> str: """Print a GraphQLError to a string. The printed string will contain useful location information about the error's position in the source. """ printed_locations: List[str] = [] print_location = printed_locations.append if error.nodes: for node in error.nodes: if node.loc: print_location( highlight_source_at_location( node.loc.source, node.loc.source.get_location(node.loc.start) ) ) elif error.source and error.locations: source = error.source for location in error.locations: print_location(highlight_source_at_location(source, location)) if printed_locations: return "\n\n".join([error.message] + printed_locations) + "\n" return error.message
Print a GraphQLError to a string. The printed string will contain useful location information about the error's position in the source.
Below is the the instruction that describes the task: ### Input: Print a GraphQLError to a string. The printed string will contain useful location information about the error's position in the source. ### Response: def print_error(error: "GraphQLError") -> str: """Print a GraphQLError to a string. The printed string will contain useful location information about the error's position in the source. """ printed_locations: List[str] = [] print_location = printed_locations.append if error.nodes: for node in error.nodes: if node.loc: print_location( highlight_source_at_location( node.loc.source, node.loc.source.get_location(node.loc.start) ) ) elif error.source and error.locations: source = error.source for location in error.locations: print_location(highlight_source_at_location(source, location)) if printed_locations: return "\n\n".join([error.message] + printed_locations) + "\n" return error.message
def make_position_choices(self): """Create choices for available positions """ choices = [] for pos in self.get_available_positions(): choices.append({ "ResultValue": pos, "ResultText": pos, }) return choices
Create choices for available positions
Below is the the instruction that describes the task: ### Input: Create choices for available positions ### Response: def make_position_choices(self): """Create choices for available positions """ choices = [] for pos in self.get_available_positions(): choices.append({ "ResultValue": pos, "ResultText": pos, }) return choices
def on_number(self, ctx, value): ''' Since this is defined both integer and double callbacks are useless ''' value = int(value) if value.isdigit() else float(value) top = self._stack[-1] if top is JSONCompositeType.OBJECT: self.fire(JSONStreamer.VALUE_EVENT, value) elif top is JSONCompositeType.ARRAY: self.fire(JSONStreamer.ELEMENT_EVENT, value) else: raise RuntimeError('Invalid json-streamer state')
Since this is defined both integer and double callbacks are useless
Below is the the instruction that describes the task: ### Input: Since this is defined both integer and double callbacks are useless ### Response: def on_number(self, ctx, value): ''' Since this is defined both integer and double callbacks are useless ''' value = int(value) if value.isdigit() else float(value) top = self._stack[-1] if top is JSONCompositeType.OBJECT: self.fire(JSONStreamer.VALUE_EVENT, value) elif top is JSONCompositeType.ARRAY: self.fire(JSONStreamer.ELEMENT_EVENT, value) else: raise RuntimeError('Invalid json-streamer state')
def _load_permissions(self): """Load permissions associated to actions.""" result = _P(needs=set(), excludes=set()) if not self.allow_by_default: result.needs.update(self.explicit_needs) for explicit_need in self.explicit_needs: if explicit_need.method == 'action': action = current_access.get_action_cache( self._cache_key(explicit_need) ) if action is None: action = _P(needs=set(), excludes=set()) actionsusers = ActionUsers.query_by_action( explicit_need ).all() actionsroles = ActionRoles.query_by_action( explicit_need ).join( ActionRoles.role ).all() actionssystem = ActionSystemRoles.query_by_action( explicit_need ).all() for db_action in chain( actionsusers, actionsroles, actionssystem): if db_action.exclude: action.excludes.add(db_action.need) else: action.needs.add(db_action.need) current_access.set_action_cache( self._cache_key(explicit_need), action ) # in-place update of results result.update(action) elif self.allow_by_default: result.needs.add(explicit_need) self._permissions = result
Load permissions associated to actions.
Below is the the instruction that describes the task: ### Input: Load permissions associated to actions. ### Response: def _load_permissions(self): """Load permissions associated to actions.""" result = _P(needs=set(), excludes=set()) if not self.allow_by_default: result.needs.update(self.explicit_needs) for explicit_need in self.explicit_needs: if explicit_need.method == 'action': action = current_access.get_action_cache( self._cache_key(explicit_need) ) if action is None: action = _P(needs=set(), excludes=set()) actionsusers = ActionUsers.query_by_action( explicit_need ).all() actionsroles = ActionRoles.query_by_action( explicit_need ).join( ActionRoles.role ).all() actionssystem = ActionSystemRoles.query_by_action( explicit_need ).all() for db_action in chain( actionsusers, actionsroles, actionssystem): if db_action.exclude: action.excludes.add(db_action.need) else: action.needs.add(db_action.need) current_access.set_action_cache( self._cache_key(explicit_need), action ) # in-place update of results result.update(action) elif self.allow_by_default: result.needs.add(explicit_need) self._permissions = result
def completed_number(prefix, length): """ 'prefix' is the start of the CC number as a string, any number of digits. 'length' is the length of the CC number to generate. Typically 13 or 16 """ ccnumber = prefix # generate digits while len(ccnumber) < (length - 1): digit = random.choice(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']) ccnumber.append(digit) # Calculate sum sum = 0 pos = 0 reversedCCnumber = [] reversedCCnumber.extend(ccnumber) reversedCCnumber.reverse() while pos < length - 1: odd = int( reversedCCnumber[pos] ) * 2 if odd > 9: odd -= 9 sum += odd if pos != (length - 2): sum += int( reversedCCnumber[pos+1] ) pos += 2 # Calculate check digit checkdigit = ((sum / 10 + 1) * 10 - sum) % 10 ccnumber.append( str(int(checkdigit)) ) return ''.join(ccnumber)
'prefix' is the start of the CC number as a string, any number of digits. 'length' is the length of the CC number to generate. Typically 13 or 16
Below is the the instruction that describes the task: ### Input: 'prefix' is the start of the CC number as a string, any number of digits. 'length' is the length of the CC number to generate. Typically 13 or 16 ### Response: def completed_number(prefix, length): """ 'prefix' is the start of the CC number as a string, any number of digits. 'length' is the length of the CC number to generate. Typically 13 or 16 """ ccnumber = prefix # generate digits while len(ccnumber) < (length - 1): digit = random.choice(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']) ccnumber.append(digit) # Calculate sum sum = 0 pos = 0 reversedCCnumber = [] reversedCCnumber.extend(ccnumber) reversedCCnumber.reverse() while pos < length - 1: odd = int( reversedCCnumber[pos] ) * 2 if odd > 9: odd -= 9 sum += odd if pos != (length - 2): sum += int( reversedCCnumber[pos+1] ) pos += 2 # Calculate check digit checkdigit = ((sum / 10 + 1) * 10 - sum) % 10 ccnumber.append( str(int(checkdigit)) ) return ''.join(ccnumber)
def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix)
Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int
Below is the the instruction that describes the task: ### Input: Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int ### Response: def condensed_coords(i, j, n): """Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int """ # guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix)
def configure_upload(graph, ns, mappings, exclude_func=None): """ Register Upload endpoints for a resource object. """ convention = UploadConvention(graph, exclude_func) convention.configure(ns, mappings)
Register Upload endpoints for a resource object.
Below is the the instruction that describes the task: ### Input: Register Upload endpoints for a resource object. ### Response: def configure_upload(graph, ns, mappings, exclude_func=None): """ Register Upload endpoints for a resource object. """ convention = UploadConvention(graph, exclude_func) convention.configure(ns, mappings)
def contains_ignoring_case(self, *items): """Asserts that val is string and contains the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') if isinstance(self.val, str_types): if len(items) == 1: if not isinstance(items[0], str_types): raise TypeError('given arg must be a string') if items[0].lower() not in self.val.lower(): self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0])) else: missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') if i.lower() not in self.val.lower(): missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) elif isinstance(self.val, Iterable): missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') found = False for v in self.val: if not isinstance(v, str_types): raise TypeError('val items must all be strings') if i.lower() == v.lower(): found = True break if not found: missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) else: raise TypeError('val is not a string or iterable') return self
Asserts that val is string and contains the given item or items.
Below is the the instruction that describes the task: ### Input: Asserts that val is string and contains the given item or items. ### Response: def contains_ignoring_case(self, *items): """Asserts that val is string and contains the given item or items.""" if len(items) == 0: raise ValueError('one or more args must be given') if isinstance(self.val, str_types): if len(items) == 1: if not isinstance(items[0], str_types): raise TypeError('given arg must be a string') if items[0].lower() not in self.val.lower(): self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0])) else: missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') if i.lower() not in self.val.lower(): missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) elif isinstance(self.val, Iterable): missing = [] for i in items: if not isinstance(i, str_types): raise TypeError('given args must all be strings') found = False for v in self.val: if not isinstance(v, str_types): raise TypeError('val items must all be strings') if i.lower() == v.lower(): found = True break if not found: missing.append(i) if missing: self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing))) else: raise TypeError('val is not a string or iterable') return self
def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to. """ labels, inertia = libdistance.assign_nearest( X, self.cluster_centers_, metric=self.metric) return labels
Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to.
Below is the the instruction that describes the task: ### Input: Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to. ### Response: def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : array-like, shape = [n_samples, n_features] New data to predict. Returns ------- Y : array, shape [n_samples,] Index of the closest center each sample belongs to. """ labels, inertia = libdistance.assign_nearest( X, self.cluster_centers_, metric=self.metric) return labels
def check_completeness(self): """Raise a |RuntimeError| if the |IOSequence.series| contains at least one |numpy.nan| value, if option |Options.checkseries| is enabled. >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-11', '1d' >>> from hydpy.core.sequencetools import IOSequence >>> class Seq(IOSequence): ... NDIM = 0 >>> seq = Seq(None) >>> seq.activate_ram() >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 10 nan values. >>> seq.series = 1.0 >>> seq.check_completeness() >>> seq.series[3] = numpy.nan >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 1 nan value. >>> with pub.options.checkseries(False): ... seq.check_completeness() """ if hydpy.pub.options.checkseries: isnan = numpy.isnan(self.series) if numpy.any(isnan): nmb = numpy.sum(isnan) valuestring = 'value' if nmb == 1 else 'values' raise RuntimeError( f'The series array of sequence ' f'{objecttools.devicephrase(self)} contains ' f'{nmb} nan {valuestring}.')
Raise a |RuntimeError| if the |IOSequence.series| contains at least one |numpy.nan| value, if option |Options.checkseries| is enabled. >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-11', '1d' >>> from hydpy.core.sequencetools import IOSequence >>> class Seq(IOSequence): ... NDIM = 0 >>> seq = Seq(None) >>> seq.activate_ram() >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 10 nan values. >>> seq.series = 1.0 >>> seq.check_completeness() >>> seq.series[3] = numpy.nan >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 1 nan value. >>> with pub.options.checkseries(False): ... seq.check_completeness()
Below is the the instruction that describes the task: ### Input: Raise a |RuntimeError| if the |IOSequence.series| contains at least one |numpy.nan| value, if option |Options.checkseries| is enabled. >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-11', '1d' >>> from hydpy.core.sequencetools import IOSequence >>> class Seq(IOSequence): ... NDIM = 0 >>> seq = Seq(None) >>> seq.activate_ram() >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 10 nan values. >>> seq.series = 1.0 >>> seq.check_completeness() >>> seq.series[3] = numpy.nan >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 1 nan value. >>> with pub.options.checkseries(False): ... seq.check_completeness() ### Response: def check_completeness(self): """Raise a |RuntimeError| if the |IOSequence.series| contains at least one |numpy.nan| value, if option |Options.checkseries| is enabled. >>> from hydpy import pub >>> pub.timegrids = '2000-01-01', '2000-01-11', '1d' >>> from hydpy.core.sequencetools import IOSequence >>> class Seq(IOSequence): ... NDIM = 0 >>> seq = Seq(None) >>> seq.activate_ram() >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 10 nan values. >>> seq.series = 1.0 >>> seq.check_completeness() >>> seq.series[3] = numpy.nan >>> seq.check_completeness() Traceback (most recent call last): ... RuntimeError: The series array of sequence `seq` contains 1 nan value. >>> with pub.options.checkseries(False): ... seq.check_completeness() """ if hydpy.pub.options.checkseries: isnan = numpy.isnan(self.series) if numpy.any(isnan): nmb = numpy.sum(isnan) valuestring = 'value' if nmb == 1 else 'values' raise RuntimeError( f'The series array of sequence ' f'{objecttools.devicephrase(self)} contains ' f'{nmb} nan {valuestring}.')
def cprint(text, fg=grey, bg=blackbg, w=norm, cr=False, encoding='utf8'): ''' Print a string in a specified color style and then return to normal. def cprint(text, fg=white, bg=blackbg, w=norm, cr=True): ''' colorstart(fg, bg, w) out(text) colorend(cr)
Print a string in a specified color style and then return to normal. def cprint(text, fg=white, bg=blackbg, w=norm, cr=True):
Below is the the instruction that describes the task: ### Input: Print a string in a specified color style and then return to normal. def cprint(text, fg=white, bg=blackbg, w=norm, cr=True): ### Response: def cprint(text, fg=grey, bg=blackbg, w=norm, cr=False, encoding='utf8'): ''' Print a string in a specified color style and then return to normal. def cprint(text, fg=white, bg=blackbg, w=norm, cr=True): ''' colorstart(fg, bg, w) out(text) colorend(cr)
def createReferenceURL(self, pid, name, ref_url, path="", validate=True): """Create a Referenced Content File (.url) :param pid: The HydroShare ID of the resource for which the file should be created :param name: Filename for the referenced file :param ref_url: url to be used in the referenced file :param path: Optional, defaults to contents directory if not provided. Folder path for the file to be created in :return: JsonResponse on success or HttpResponse with error status code on error :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered. """ return self.createReferencedFile(pid, path, name, ref_url, validate)
Create a Referenced Content File (.url) :param pid: The HydroShare ID of the resource for which the file should be created :param name: Filename for the referenced file :param ref_url: url to be used in the referenced file :param path: Optional, defaults to contents directory if not provided. Folder path for the file to be created in :return: JsonResponse on success or HttpResponse with error status code on error :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered.
Below is the the instruction that describes the task: ### Input: Create a Referenced Content File (.url) :param pid: The HydroShare ID of the resource for which the file should be created :param name: Filename for the referenced file :param ref_url: url to be used in the referenced file :param path: Optional, defaults to contents directory if not provided. Folder path for the file to be created in :return: JsonResponse on success or HttpResponse with error status code on error :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered. ### Response: def createReferenceURL(self, pid, name, ref_url, path="", validate=True): """Create a Referenced Content File (.url) :param pid: The HydroShare ID of the resource for which the file should be created :param name: Filename for the referenced file :param ref_url: url to be used in the referenced file :param path: Optional, defaults to contents directory if not provided. Folder path for the file to be created in :return: JsonResponse on success or HttpResponse with error status code on error :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered. """ return self.createReferencedFile(pid, path, name, ref_url, validate)
def text_bounding_box(self, size_pt, text): """ Return the bounding box of the given text at the given font size. :param int size_pt: the font size in points :param string text: the text :rtype: tuple (width, height) """ if size_pt == 12: mult = {"h": 9, "w_digit": 5, "w_space": 2} elif size_pt == 18: mult = {"h": 14, "w_digit": 9, "w_space": 2} num_chars = len(text) return (num_chars * mult["w_digit"] + (num_chars - 1) * mult["w_space"] + 1, mult["h"])
Return the bounding box of the given text at the given font size. :param int size_pt: the font size in points :param string text: the text :rtype: tuple (width, height)
Below is the the instruction that describes the task: ### Input: Return the bounding box of the given text at the given font size. :param int size_pt: the font size in points :param string text: the text :rtype: tuple (width, height) ### Response: def text_bounding_box(self, size_pt, text): """ Return the bounding box of the given text at the given font size. :param int size_pt: the font size in points :param string text: the text :rtype: tuple (width, height) """ if size_pt == 12: mult = {"h": 9, "w_digit": 5, "w_space": 2} elif size_pt == 18: mult = {"h": 14, "w_digit": 9, "w_space": 2} num_chars = len(text) return (num_chars * mult["w_digit"] + (num_chars - 1) * mult["w_space"] + 1, mult["h"])
def get_width(self, c, default=0, match_only=None): """ Get the display width of a component. Wraps `getattr()`. Development note: Cannot define this as a `partial()` because I want to maintain the order of arguments in `getattr()`. Args: c (component): The component to look up. default (float): The width to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: float. The width of the matching Decor in the Legend. """ return self.getattr(c=c, attr='width', default=default, match_only=match_only)
Get the display width of a component. Wraps `getattr()`. Development note: Cannot define this as a `partial()` because I want to maintain the order of arguments in `getattr()`. Args: c (component): The component to look up. default (float): The width to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: float. The width of the matching Decor in the Legend.
Below is the the instruction that describes the task: ### Input: Get the display width of a component. Wraps `getattr()`. Development note: Cannot define this as a `partial()` because I want to maintain the order of arguments in `getattr()`. Args: c (component): The component to look up. default (float): The width to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: float. The width of the matching Decor in the Legend. ### Response: def get_width(self, c, default=0, match_only=None): """ Get the display width of a component. Wraps `getattr()`. Development note: Cannot define this as a `partial()` because I want to maintain the order of arguments in `getattr()`. Args: c (component): The component to look up. default (float): The width to return in the event of no match. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: float. The width of the matching Decor in the Legend. """ return self.getattr(c=c, attr='width', default=default, match_only=match_only)
def for_user(cls, user): ''' Returns the user's current cart, or creates a new cart if there isn't one ready yet. ''' try: existing = commerce.Cart.objects.get( user=user, status=commerce.Cart.STATUS_ACTIVE, ) except ObjectDoesNotExist: existing = commerce.Cart.objects.create( user=user, time_last_updated=timezone.now(), reservation_duration=datetime.timedelta(), ) return cls(existing)
Returns the user's current cart, or creates a new cart if there isn't one ready yet.
Below is the the instruction that describes the task: ### Input: Returns the user's current cart, or creates a new cart if there isn't one ready yet. ### Response: def for_user(cls, user): ''' Returns the user's current cart, or creates a new cart if there isn't one ready yet. ''' try: existing = commerce.Cart.objects.get( user=user, status=commerce.Cart.STATUS_ACTIVE, ) except ObjectDoesNotExist: existing = commerce.Cart.objects.create( user=user, time_last_updated=timezone.now(), reservation_duration=datetime.timedelta(), ) return cls(existing)
def clean_account(self): """Ensure this is an income account""" account = self.cleaned_data['account'] if not account: return if account.type != Account.TYPES.income: raise ValidationError('Account must be an income account') try: account.housemate except Housemate.DoesNotExist: pass else: raise ValidationError('Account already has a housemate') return account
Ensure this is an income account
Below is the the instruction that describes the task: ### Input: Ensure this is an income account ### Response: def clean_account(self): """Ensure this is an income account""" account = self.cleaned_data['account'] if not account: return if account.type != Account.TYPES.income: raise ValidationError('Account must be an income account') try: account.housemate except Housemate.DoesNotExist: pass else: raise ValidationError('Account already has a housemate') return account
def get_lock_requests(self): """Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned. """ d = defaultdict(list) if self._context: for variant in self._context.resolved_packages: name = variant.name version = variant.version lock = self.patch_locks.get(name) if lock is None: lock = self.default_patch_lock request = get_lock_request(name, version, lock) if request is not None: d[lock].append(request) return d
Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned.
Below is the the instruction that describes the task: ### Input: Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned. ### Response: def get_lock_requests(self): """Take the current context, and the current patch locks, and determine the effective requests that will be added to the main request. Returns: A dict of (PatchLock, [Requirement]) tuples. Each requirement will be a weak package reference. If there is no current context, an empty dict will be returned. """ d = defaultdict(list) if self._context: for variant in self._context.resolved_packages: name = variant.name version = variant.version lock = self.patch_locks.get(name) if lock is None: lock = self.default_patch_lock request = get_lock_request(name, version, lock) if request is not None: d[lock].append(request) return d
def _determinebase_address(self): """ The basic idea is simple: start from a specific point, try to construct functions as much as we can, and maintain a function distribution graph and a call graph simultaneously. Repeat searching until we come to the end that there is no new function to be found. A function should start with: # some addresses that a call exit leads to, or # certain instructions. They are recoreded in SimArch. For a better performance, instead of blindly scanning the entire process space, we first try to search for instruction patterns that a function may start with, and start scanning at those positions. Then we try to decode anything that is left. """ traced_address = set() self.functions = set() self.call_map = networkx.DiGraph() self.cfg = networkx.DiGraph() initial_state = self.project.factory.blank_state(mode="fastpath") initial_options = initial_state.options - { o.TRACK_CONSTRAINTS } - o.refs initial_options |= { o.SUPER_FASTPATH } # initial_options.remove(o.COW_STATES) initial_state.options = initial_options # Sadly, not all calls to functions are explicitly made by call # instruction - they could be a jmp or b, or something else. So we # should record all exits from a single function, and then add # necessary calling edges in our call map during the post-processing # phase. function_exits = defaultdict(set) dump_file_prefix = self.project.filename if self._pickle_intermediate_results and \ os.path.exists(dump_file_prefix + "_indirect_jumps.angr"): l.debug("Loading existing intermediate results.") self._indirect_jumps = pickle.load(open(dump_file_prefix + "_indirect_jumps.angr", "rb")) self.cfg = pickle.load(open(dump_file_prefix + "_coercecfg.angr", "rb")) self._unassured_functions = pickle.load(open(dump_file_prefix + "_unassured_functions.angr", "rb")) else: # Performance boost :-) # Scan for existing function prologues self._scan_function_prologues(traced_address, function_exits, initial_state) if self._pickle_intermediate_results: l.debug("Dumping intermediate results.") pickle.dump(self._indirect_jumps, open(dump_file_prefix + "_indirect_jumps.angr", "wb"), -1) pickle.dump(self.cfg, open(dump_file_prefix + "_coercecfg.angr", "wb"), -1) pickle.dump(self._unassured_functions, open(dump_file_prefix + "_unassured_functions.angr", "wb"), -1) if len(self._indirect_jumps): # We got some indirect jumps! # Gotta execute each basic block and see where it wants to jump to function_starts = self._process_indirect_jumps() self.base_address = self._solve_forbase_address(function_starts, self._unassured_functions) l.info("Base address should be 0x%x", self.base_address) else: l.debug("No indirect jumps are found. We switch to the slowpath mode.") # TODO: Slowpath mode... while True: next_addr = self._get_next_code_addr(initial_state) percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size) l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage) if next_addr is None: break self.call_map.add_node(next_addr) self._scan_code(traced_address, function_exits, initial_state, next_addr) # Post-processing: Map those calls that are not made by call/blr # instructions to their targets in our map for src, s in function_exits.items(): if src in self.call_map: for target in s: if target in self.call_map: self.call_map.add_edge(src, target) nodes = sorted(self.call_map.nodes()) for i in range(len(nodes) - 1): if nodes[i] >= nodes[i + 1] - 4: for dst in self.call_map.successors(nodes[i + 1]): self.call_map.add_edge(nodes[i], dst) for src in self.call_map.predecessors(nodes[i + 1]): self.call_map.add_edge(src, nodes[i]) self.call_map.remove_node(nodes[i + 1]) l.debug("Construction finished.")
The basic idea is simple: start from a specific point, try to construct functions as much as we can, and maintain a function distribution graph and a call graph simultaneously. Repeat searching until we come to the end that there is no new function to be found. A function should start with: # some addresses that a call exit leads to, or # certain instructions. They are recoreded in SimArch. For a better performance, instead of blindly scanning the entire process space, we first try to search for instruction patterns that a function may start with, and start scanning at those positions. Then we try to decode anything that is left.
Below is the the instruction that describes the task: ### Input: The basic idea is simple: start from a specific point, try to construct functions as much as we can, and maintain a function distribution graph and a call graph simultaneously. Repeat searching until we come to the end that there is no new function to be found. A function should start with: # some addresses that a call exit leads to, or # certain instructions. They are recoreded in SimArch. For a better performance, instead of blindly scanning the entire process space, we first try to search for instruction patterns that a function may start with, and start scanning at those positions. Then we try to decode anything that is left. ### Response: def _determinebase_address(self): """ The basic idea is simple: start from a specific point, try to construct functions as much as we can, and maintain a function distribution graph and a call graph simultaneously. Repeat searching until we come to the end that there is no new function to be found. A function should start with: # some addresses that a call exit leads to, or # certain instructions. They are recoreded in SimArch. For a better performance, instead of blindly scanning the entire process space, we first try to search for instruction patterns that a function may start with, and start scanning at those positions. Then we try to decode anything that is left. """ traced_address = set() self.functions = set() self.call_map = networkx.DiGraph() self.cfg = networkx.DiGraph() initial_state = self.project.factory.blank_state(mode="fastpath") initial_options = initial_state.options - { o.TRACK_CONSTRAINTS } - o.refs initial_options |= { o.SUPER_FASTPATH } # initial_options.remove(o.COW_STATES) initial_state.options = initial_options # Sadly, not all calls to functions are explicitly made by call # instruction - they could be a jmp or b, or something else. So we # should record all exits from a single function, and then add # necessary calling edges in our call map during the post-processing # phase. function_exits = defaultdict(set) dump_file_prefix = self.project.filename if self._pickle_intermediate_results and \ os.path.exists(dump_file_prefix + "_indirect_jumps.angr"): l.debug("Loading existing intermediate results.") self._indirect_jumps = pickle.load(open(dump_file_prefix + "_indirect_jumps.angr", "rb")) self.cfg = pickle.load(open(dump_file_prefix + "_coercecfg.angr", "rb")) self._unassured_functions = pickle.load(open(dump_file_prefix + "_unassured_functions.angr", "rb")) else: # Performance boost :-) # Scan for existing function prologues self._scan_function_prologues(traced_address, function_exits, initial_state) if self._pickle_intermediate_results: l.debug("Dumping intermediate results.") pickle.dump(self._indirect_jumps, open(dump_file_prefix + "_indirect_jumps.angr", "wb"), -1) pickle.dump(self.cfg, open(dump_file_prefix + "_coercecfg.angr", "wb"), -1) pickle.dump(self._unassured_functions, open(dump_file_prefix + "_unassured_functions.angr", "wb"), -1) if len(self._indirect_jumps): # We got some indirect jumps! # Gotta execute each basic block and see where it wants to jump to function_starts = self._process_indirect_jumps() self.base_address = self._solve_forbase_address(function_starts, self._unassured_functions) l.info("Base address should be 0x%x", self.base_address) else: l.debug("No indirect jumps are found. We switch to the slowpath mode.") # TODO: Slowpath mode... while True: next_addr = self._get_next_code_addr(initial_state) percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size) l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage) if next_addr is None: break self.call_map.add_node(next_addr) self._scan_code(traced_address, function_exits, initial_state, next_addr) # Post-processing: Map those calls that are not made by call/blr # instructions to their targets in our map for src, s in function_exits.items(): if src in self.call_map: for target in s: if target in self.call_map: self.call_map.add_edge(src, target) nodes = sorted(self.call_map.nodes()) for i in range(len(nodes) - 1): if nodes[i] >= nodes[i + 1] - 4: for dst in self.call_map.successors(nodes[i + 1]): self.call_map.add_edge(nodes[i], dst) for src in self.call_map.predecessors(nodes[i + 1]): self.call_map.add_edge(src, nodes[i]) self.call_map.remove_node(nodes[i + 1]) l.debug("Construction finished.")
def wantFile(self, file): """Is the file a wanted test file? The file must be a python source file and match testMatch or include, and not match exclude. Files that match ignore are *never* wanted, regardless of plugin, testMatch, include or exclude settings. """ # never, ever load files that match anything in ignore # (.* _* and *setup*.py by default) base = op_basename(file) ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles if ignore_this.search(base) ] if ignore_matches: log.debug('%s matches ignoreFiles pattern; skipped', base) return False if not self.config.includeExe and os.access(file, os.X_OK): log.info('%s is executable; skipped', file) return False dummy, ext = op_splitext(base) pysrc = ext == '.py' wanted = pysrc and self.matches(base) plug_wants = self.plugins.wantFile(file) if plug_wants is not None: log.debug("plugin setting want %s to %s", file, plug_wants) wanted = plug_wants log.debug("wantFile %s? %s", file, wanted) return wanted
Is the file a wanted test file? The file must be a python source file and match testMatch or include, and not match exclude. Files that match ignore are *never* wanted, regardless of plugin, testMatch, include or exclude settings.
Below is the the instruction that describes the task: ### Input: Is the file a wanted test file? The file must be a python source file and match testMatch or include, and not match exclude. Files that match ignore are *never* wanted, regardless of plugin, testMatch, include or exclude settings. ### Response: def wantFile(self, file): """Is the file a wanted test file? The file must be a python source file and match testMatch or include, and not match exclude. Files that match ignore are *never* wanted, regardless of plugin, testMatch, include or exclude settings. """ # never, ever load files that match anything in ignore # (.* _* and *setup*.py by default) base = op_basename(file) ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles if ignore_this.search(base) ] if ignore_matches: log.debug('%s matches ignoreFiles pattern; skipped', base) return False if not self.config.includeExe and os.access(file, os.X_OK): log.info('%s is executable; skipped', file) return False dummy, ext = op_splitext(base) pysrc = ext == '.py' wanted = pysrc and self.matches(base) plug_wants = self.plugins.wantFile(file) if plug_wants is not None: log.debug("plugin setting want %s to %s", file, plug_wants) wanted = plug_wants log.debug("wantFile %s? %s", file, wanted) return wanted
def next(self, data): """ Derive a new set of internal and output data from given input data and the data stored internally. Use the key derivation function to derive new data. The kdf gets supplied with the current key and the data passed to this method. :param data: A bytes-like object encoding the data to pass to the key derivation function. :returns: A bytes-like object encoding the output material. """ self.__length += 1 result = self.__kdf.calculate(self.__key, data, 64) self.__key = result[:32] return result[32:]
Derive a new set of internal and output data from given input data and the data stored internally. Use the key derivation function to derive new data. The kdf gets supplied with the current key and the data passed to this method. :param data: A bytes-like object encoding the data to pass to the key derivation function. :returns: A bytes-like object encoding the output material.
Below is the the instruction that describes the task: ### Input: Derive a new set of internal and output data from given input data and the data stored internally. Use the key derivation function to derive new data. The kdf gets supplied with the current key and the data passed to this method. :param data: A bytes-like object encoding the data to pass to the key derivation function. :returns: A bytes-like object encoding the output material. ### Response: def next(self, data): """ Derive a new set of internal and output data from given input data and the data stored internally. Use the key derivation function to derive new data. The kdf gets supplied with the current key and the data passed to this method. :param data: A bytes-like object encoding the data to pass to the key derivation function. :returns: A bytes-like object encoding the output material. """ self.__length += 1 result = self.__kdf.calculate(self.__key, data, 64) self.__key = result[:32] return result[32:]
async def action_handler(self): """ Call vtep controller in sequence, merge mutiple calls if possible When a bind relationship is updated, we always send all logical ports to a logicalswitch, to make sure it recovers from some failed updates (so called idempotency). When multiple calls are pending, we only need to send the last of them. """ bind_event = VtepControllerCall.createMatcher(self._conn) event_queue = [] timeout_flag = [False] async def handle_action(): while event_queue or timeout_flag[0]: events = event_queue[:] del event_queue[:] for e in events: # every event must have physname , phyiname # physname: physical switch name - must be same with OVSDB-VTEP switch # phyiname: physical port name - must be same with the corresponding port physname = e.physname phyiname = e.phyiname if e.type == VtepControllerCall.UNBINDALL: # clear all other event info self._store_event[(physname,phyiname)] = {"all":e} elif e.type == VtepControllerCall.BIND: # bind will combine bind event before vlanid = e.vlanid if (physname,phyiname) in self._store_event: v = self._store_event[(physname,phyiname)] if vlanid in v: logicalports = e.logicalports v.update({vlanid:(e.type,e.logicalnetworkid,e.vni,logicalports)}) self._store_event[(physname,phyiname)] = v else: # new bind info , no combind event v.update({vlanid:(e.type,e.logicalnetworkid,e.vni,e.logicalports)}) self._store_event[(physname,phyiname)] = v else: self._store_event[(physname,phyiname)] = {vlanid:(e.type,e.logicalnetworkid, e.vni,e.logicalports)} elif e.type == VtepControllerCall.UNBIND: vlanid = e.vlanid if (physname,phyiname) in self._store_event: v = self._store_event[(physname,phyiname)] v.update({vlanid:(e.type,e.logicalnetworkid)}) self._store_event[(physname,phyiname)] = v else: self._store_event[(physname,phyiname)] = {vlanid:(e.type,e.logicalnetworkid)} else: self._parent._logger.warning("catch error type event %r , ignore it", exc_info=True) continue call = [] target_name = "vtepcontroller" for k,v in self._store_event.items(): if "all" in v: # send unbindall call.append(self.api(self,target_name,"unbindphysicalport", {"physicalswitch": k[0], "physicalport": k[1]}, timeout=10)) # unbindall , del it whatever del v["all"] try: await self.execute_all(call) except Exception: self._parent._logger.warning("unbindall remove call failed", exc_info=True) for k,v in self._store_event.items(): for vlanid , e in dict(v).items(): if vlanid != "all": if e[0] == VtepControllerCall.BIND: params = {"physicalswitch": k[0], "physicalport": k[1], "vlanid": vlanid, "logicalnetwork": e[1], "vni":e[2], "logicalports": e[3]} try: await self.api(self,target_name,"updatelogicalswitch", params,timeout=10) except Exception: self._parent._logger.warning("update logical switch error,try next %r",params, exc_info=True) else: del self._store_event[k][vlanid] elif e[0] == VtepControllerCall.UNBIND: params = {"logicalnetwork":e[1], "physicalswitch":k[0], "physicalport":k[1], "vlanid":vlanid} try: await self.api(self,target_name,"unbindlogicalswitch", params,timeout=10) except Exception: self._parent._logger.warning("unbind logical switch error,try next %r",params, exc_info=True) else: del self._store_event[k][vlanid] self._store_event = dict((k,v) for k,v in self._store_event.items() if v) if timeout_flag[0]: timeout_flag[0] = False def append_event(event, matcher): event_queue.append(event) while True: timeout, ev, m = await self.wait_with_timeout(10, bind_event) if not timeout: event_queue.append(ev) else: timeout_flag[0] = True await self.with_callback(handle_action(), append_event, bind_event)
Call vtep controller in sequence, merge mutiple calls if possible When a bind relationship is updated, we always send all logical ports to a logicalswitch, to make sure it recovers from some failed updates (so called idempotency). When multiple calls are pending, we only need to send the last of them.
Below is the the instruction that describes the task: ### Input: Call vtep controller in sequence, merge mutiple calls if possible When a bind relationship is updated, we always send all logical ports to a logicalswitch, to make sure it recovers from some failed updates (so called idempotency). When multiple calls are pending, we only need to send the last of them. ### Response: async def action_handler(self): """ Call vtep controller in sequence, merge mutiple calls if possible When a bind relationship is updated, we always send all logical ports to a logicalswitch, to make sure it recovers from some failed updates (so called idempotency). When multiple calls are pending, we only need to send the last of them. """ bind_event = VtepControllerCall.createMatcher(self._conn) event_queue = [] timeout_flag = [False] async def handle_action(): while event_queue or timeout_flag[0]: events = event_queue[:] del event_queue[:] for e in events: # every event must have physname , phyiname # physname: physical switch name - must be same with OVSDB-VTEP switch # phyiname: physical port name - must be same with the corresponding port physname = e.physname phyiname = e.phyiname if e.type == VtepControllerCall.UNBINDALL: # clear all other event info self._store_event[(physname,phyiname)] = {"all":e} elif e.type == VtepControllerCall.BIND: # bind will combine bind event before vlanid = e.vlanid if (physname,phyiname) in self._store_event: v = self._store_event[(physname,phyiname)] if vlanid in v: logicalports = e.logicalports v.update({vlanid:(e.type,e.logicalnetworkid,e.vni,logicalports)}) self._store_event[(physname,phyiname)] = v else: # new bind info , no combind event v.update({vlanid:(e.type,e.logicalnetworkid,e.vni,e.logicalports)}) self._store_event[(physname,phyiname)] = v else: self._store_event[(physname,phyiname)] = {vlanid:(e.type,e.logicalnetworkid, e.vni,e.logicalports)} elif e.type == VtepControllerCall.UNBIND: vlanid = e.vlanid if (physname,phyiname) in self._store_event: v = self._store_event[(physname,phyiname)] v.update({vlanid:(e.type,e.logicalnetworkid)}) self._store_event[(physname,phyiname)] = v else: self._store_event[(physname,phyiname)] = {vlanid:(e.type,e.logicalnetworkid)} else: self._parent._logger.warning("catch error type event %r , ignore it", exc_info=True) continue call = [] target_name = "vtepcontroller" for k,v in self._store_event.items(): if "all" in v: # send unbindall call.append(self.api(self,target_name,"unbindphysicalport", {"physicalswitch": k[0], "physicalport": k[1]}, timeout=10)) # unbindall , del it whatever del v["all"] try: await self.execute_all(call) except Exception: self._parent._logger.warning("unbindall remove call failed", exc_info=True) for k,v in self._store_event.items(): for vlanid , e in dict(v).items(): if vlanid != "all": if e[0] == VtepControllerCall.BIND: params = {"physicalswitch": k[0], "physicalport": k[1], "vlanid": vlanid, "logicalnetwork": e[1], "vni":e[2], "logicalports": e[3]} try: await self.api(self,target_name,"updatelogicalswitch", params,timeout=10) except Exception: self._parent._logger.warning("update logical switch error,try next %r",params, exc_info=True) else: del self._store_event[k][vlanid] elif e[0] == VtepControllerCall.UNBIND: params = {"logicalnetwork":e[1], "physicalswitch":k[0], "physicalport":k[1], "vlanid":vlanid} try: await self.api(self,target_name,"unbindlogicalswitch", params,timeout=10) except Exception: self._parent._logger.warning("unbind logical switch error,try next %r",params, exc_info=True) else: del self._store_event[k][vlanid] self._store_event = dict((k,v) for k,v in self._store_event.items() if v) if timeout_flag[0]: timeout_flag[0] = False def append_event(event, matcher): event_queue.append(event) while True: timeout, ev, m = await self.wait_with_timeout(10, bind_event) if not timeout: event_queue.append(ev) else: timeout_flag[0] = True await self.with_callback(handle_action(), append_event, bind_event)
def build(self): """ The decoder computational graph consists of three components: (1) the input node `decoder_input` (2) the embedding node `decoder_embed` (3) the recurrent (RNN) part `decoder_rnn` (4) the output of the decoder RNN `decoder_output` (5) the classification output layer `decoder_dense` """ # Grab hyperparameters from self.config: hidden_dim = self.config['encoding-layer-width'] recurrent_unit = self.config['recurrent-unit-type'] bidirectional = False #self.config['encoding-layer-bidirectional'] vocab_size = self.data.properties.vocab_size embedding_dim = math.ceil(math.log(vocab_size, 2)) # self.config['embedding-dim'] input_length = self.data.properties['max-utterance-length'] + 1 # Assemble the network components: decoder_input = Input(shape=(None,)) decoder_embed = Embedding(vocab_size, embedding_dim, mask_zero=True)(decoder_input) #, input_length=input_length)(decoder_input) if recurrent_unit == 'lstm': decoder_rnn = LSTM(hidden_dim, return_sequences=True, return_state=True) decoder_output, decoder_h, decoder_c = decoder_rnn(decoder_embed, initial_state=self.encoder.encoder_hidden_state) elif recurrent_unit == 'gru': decoder_rnn = GRU(hidden_dim, return_sequences=True, return_state=True) decoder_output, _ = decoder_rnn(decoder_embed, initial_state=self.encoder.encoder_hidden_state) else: raise Exception('Invalid recurrent unit type: {}'.format(recurrent_unit)) # make the RNN component bidirectional, if desired if bidirectional: decoder_rnn = Bidirectional(decoder_rnn, merge_mode='ave') decoder_dense = Dense(vocab_size, activation='softmax') decoder_output = decoder_dense(decoder_output) # save the four Decoder components as class state self.decoder_input = decoder_input self.decoder_embed = decoder_embed self.decoder_rnn = decoder_rnn self.decoder_dense = decoder_dense self.decoder_output = decoder_output return
The decoder computational graph consists of three components: (1) the input node `decoder_input` (2) the embedding node `decoder_embed` (3) the recurrent (RNN) part `decoder_rnn` (4) the output of the decoder RNN `decoder_output` (5) the classification output layer `decoder_dense`
Below is the the instruction that describes the task: ### Input: The decoder computational graph consists of three components: (1) the input node `decoder_input` (2) the embedding node `decoder_embed` (3) the recurrent (RNN) part `decoder_rnn` (4) the output of the decoder RNN `decoder_output` (5) the classification output layer `decoder_dense` ### Response: def build(self): """ The decoder computational graph consists of three components: (1) the input node `decoder_input` (2) the embedding node `decoder_embed` (3) the recurrent (RNN) part `decoder_rnn` (4) the output of the decoder RNN `decoder_output` (5) the classification output layer `decoder_dense` """ # Grab hyperparameters from self.config: hidden_dim = self.config['encoding-layer-width'] recurrent_unit = self.config['recurrent-unit-type'] bidirectional = False #self.config['encoding-layer-bidirectional'] vocab_size = self.data.properties.vocab_size embedding_dim = math.ceil(math.log(vocab_size, 2)) # self.config['embedding-dim'] input_length = self.data.properties['max-utterance-length'] + 1 # Assemble the network components: decoder_input = Input(shape=(None,)) decoder_embed = Embedding(vocab_size, embedding_dim, mask_zero=True)(decoder_input) #, input_length=input_length)(decoder_input) if recurrent_unit == 'lstm': decoder_rnn = LSTM(hidden_dim, return_sequences=True, return_state=True) decoder_output, decoder_h, decoder_c = decoder_rnn(decoder_embed, initial_state=self.encoder.encoder_hidden_state) elif recurrent_unit == 'gru': decoder_rnn = GRU(hidden_dim, return_sequences=True, return_state=True) decoder_output, _ = decoder_rnn(decoder_embed, initial_state=self.encoder.encoder_hidden_state) else: raise Exception('Invalid recurrent unit type: {}'.format(recurrent_unit)) # make the RNN component bidirectional, if desired if bidirectional: decoder_rnn = Bidirectional(decoder_rnn, merge_mode='ave') decoder_dense = Dense(vocab_size, activation='softmax') decoder_output = decoder_dense(decoder_output) # save the four Decoder components as class state self.decoder_input = decoder_input self.decoder_embed = decoder_embed self.decoder_rnn = decoder_rnn self.decoder_dense = decoder_dense self.decoder_output = decoder_output return
def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None): """See Also: updateNodeCapabilitiesResponse() Args: nodeId: node: vendorSpecific: Returns: """ response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific) return self._read_boolean_response(response)
See Also: updateNodeCapabilitiesResponse() Args: nodeId: node: vendorSpecific: Returns:
Below is the the instruction that describes the task: ### Input: See Also: updateNodeCapabilitiesResponse() Args: nodeId: node: vendorSpecific: Returns: ### Response: def updateNodeCapabilities(self, nodeId, node, vendorSpecific=None): """See Also: updateNodeCapabilitiesResponse() Args: nodeId: node: vendorSpecific: Returns: """ response = self.updateNodeCapabilitiesResponse(nodeId, node, vendorSpecific) return self._read_boolean_response(response)
def update_preference_type(self, type, address, notification, notification_preferences_frequency): """ Update a preference. Change the preference for a single notification for a single communication channel """ path = {} data = {} params = {} # REQUIRED - PATH - type """ID""" path["type"] = type # REQUIRED - PATH - address """ID""" path["address"] = address # REQUIRED - PATH - notification """ID""" path["notification"] = notification # REQUIRED - notification_preferences[frequency] """The desired frequency for this notification""" data["notification_preferences[frequency]"] = notification_preferences_frequency self.logger.debug("PUT /api/v1/users/self/communication_channels/{type}/{address}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/self/communication_channels/{type}/{address}/notification_preferences/{notification}".format(**path), data=data, params=params, no_data=True)
Update a preference. Change the preference for a single notification for a single communication channel
Below is the the instruction that describes the task: ### Input: Update a preference. Change the preference for a single notification for a single communication channel ### Response: def update_preference_type(self, type, address, notification, notification_preferences_frequency): """ Update a preference. Change the preference for a single notification for a single communication channel """ path = {} data = {} params = {} # REQUIRED - PATH - type """ID""" path["type"] = type # REQUIRED - PATH - address """ID""" path["address"] = address # REQUIRED - PATH - notification """ID""" path["notification"] = notification # REQUIRED - notification_preferences[frequency] """The desired frequency for this notification""" data["notification_preferences[frequency]"] = notification_preferences_frequency self.logger.debug("PUT /api/v1/users/self/communication_channels/{type}/{address}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/self/communication_channels/{type}/{address}/notification_preferences/{notification}".format(**path), data=data, params=params, no_data=True)
def geocoding(ctx, query, forward, include_headers, lat, lon, place_type, output, dataset, country, bbox, features, limit): """This command returns places matching an address (forward mode) or places matching coordinates (reverse mode). In forward (the default) mode the query argument shall be an address such as '1600 pennsylvania ave nw'. $ mapbox geocoding '1600 pennsylvania ave nw' In reverse mode the query argument shall be a JSON encoded array of longitude and latitude (in that order) in decimal degrees. $ mapbox geocoding --reverse '[-77.4371, 37.5227]' An access token is required, see `mapbox --help`. """ access_token = (ctx.obj and ctx.obj.get('access_token')) or None stdout = click.open_file(output, 'w') geocoder = Geocoder(name=dataset, access_token=access_token) if forward: if country: country = [x.lower() for x in country.split(",")] if bbox: try: bbox = tuple(map(float, bbox.split(','))) except ValueError: bbox = json.loads(bbox) for q in iter_query(query): try: resp = geocoder.forward( q, types=place_type, lat=lat, lon=lon, country=country, bbox=bbox, limit=limit) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if include_headers: echo_headers(resp.headers, file=stdout) if resp.status_code == 200: if features: collection = json.loads(resp.text) for feat in collection['features']: click.echo(json.dumps(feat), file=stdout) else: click.echo(resp.text, file=stdout) else: raise MapboxCLIException(resp.text.strip()) else: for lon, lat in map(coords_from_query, iter_query(query)): try: resp = geocoder.reverse( lon=lon, lat=lat, types=place_type, limit=limit) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if include_headers: echo_headers(resp.headers, file=stdout) if resp.status_code == 200: if features: collection = json.loads(resp.text) for feat in collection['features']: click.echo(json.dumps(feat), file=stdout) else: click.echo(resp.text, file=stdout) else: raise MapboxCLIException(resp.text.strip())
This command returns places matching an address (forward mode) or places matching coordinates (reverse mode). In forward (the default) mode the query argument shall be an address such as '1600 pennsylvania ave nw'. $ mapbox geocoding '1600 pennsylvania ave nw' In reverse mode the query argument shall be a JSON encoded array of longitude and latitude (in that order) in decimal degrees. $ mapbox geocoding --reverse '[-77.4371, 37.5227]' An access token is required, see `mapbox --help`.
Below is the the instruction that describes the task: ### Input: This command returns places matching an address (forward mode) or places matching coordinates (reverse mode). In forward (the default) mode the query argument shall be an address such as '1600 pennsylvania ave nw'. $ mapbox geocoding '1600 pennsylvania ave nw' In reverse mode the query argument shall be a JSON encoded array of longitude and latitude (in that order) in decimal degrees. $ mapbox geocoding --reverse '[-77.4371, 37.5227]' An access token is required, see `mapbox --help`. ### Response: def geocoding(ctx, query, forward, include_headers, lat, lon, place_type, output, dataset, country, bbox, features, limit): """This command returns places matching an address (forward mode) or places matching coordinates (reverse mode). In forward (the default) mode the query argument shall be an address such as '1600 pennsylvania ave nw'. $ mapbox geocoding '1600 pennsylvania ave nw' In reverse mode the query argument shall be a JSON encoded array of longitude and latitude (in that order) in decimal degrees. $ mapbox geocoding --reverse '[-77.4371, 37.5227]' An access token is required, see `mapbox --help`. """ access_token = (ctx.obj and ctx.obj.get('access_token')) or None stdout = click.open_file(output, 'w') geocoder = Geocoder(name=dataset, access_token=access_token) if forward: if country: country = [x.lower() for x in country.split(",")] if bbox: try: bbox = tuple(map(float, bbox.split(','))) except ValueError: bbox = json.loads(bbox) for q in iter_query(query): try: resp = geocoder.forward( q, types=place_type, lat=lat, lon=lon, country=country, bbox=bbox, limit=limit) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if include_headers: echo_headers(resp.headers, file=stdout) if resp.status_code == 200: if features: collection = json.loads(resp.text) for feat in collection['features']: click.echo(json.dumps(feat), file=stdout) else: click.echo(resp.text, file=stdout) else: raise MapboxCLIException(resp.text.strip()) else: for lon, lat in map(coords_from_query, iter_query(query)): try: resp = geocoder.reverse( lon=lon, lat=lat, types=place_type, limit=limit) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if include_headers: echo_headers(resp.headers, file=stdout) if resp.status_code == 200: if features: collection = json.loads(resp.text) for feat in collection['features']: click.echo(json.dumps(feat), file=stdout) else: click.echo(resp.text, file=stdout) else: raise MapboxCLIException(resp.text.strip())
def setup(cls, cron_cfg="cron"): """ Set up the runtime environment. """ random.seed() logging_cfg = cls.LOGGING_CFG if "%s" in logging_cfg: logging_cfg = logging_cfg % (cron_cfg if "--cron" in sys.argv[1:] else "scripts",) logging_cfg = os.path.expanduser(logging_cfg) if os.path.exists(logging_cfg): logging.HERE = os.path.dirname(logging_cfg) logging.config.fileConfig(logging_cfg) else: logging.basicConfig(level=logging.INFO) logging.getLogger().debug("Logging config read from '%s'" % logging_cfg)
Set up the runtime environment.
Below is the the instruction that describes the task: ### Input: Set up the runtime environment. ### Response: def setup(cls, cron_cfg="cron"): """ Set up the runtime environment. """ random.seed() logging_cfg = cls.LOGGING_CFG if "%s" in logging_cfg: logging_cfg = logging_cfg % (cron_cfg if "--cron" in sys.argv[1:] else "scripts",) logging_cfg = os.path.expanduser(logging_cfg) if os.path.exists(logging_cfg): logging.HERE = os.path.dirname(logging_cfg) logging.config.fileConfig(logging_cfg) else: logging.basicConfig(level=logging.INFO) logging.getLogger().debug("Logging config read from '%s'" % logging_cfg)
def router_add(self, params): """add new router (mongos) into existing configuration""" if self.uses_rs_configdb: # Replica set configdb. rs_id = self._configsvrs[0] config_members = ReplicaSets().members(rs_id) configdb = '%s/%s' % ( rs_id, ','.join(m['host'] for m in config_members)) else: configdb = ','.join(Servers().hostname(item) for item in self._configsvrs) server_id = params.pop('server_id', None) version = params.pop('version', self._version) params.update({'configdb': configdb}) if self.enable_ipv6: common.enable_ipv6_single(params) # Remove flags that turn auth on. params = self._strip_auth(params) self._routers.append(Servers().create( 'mongos', params, sslParams=self.sslParams, autostart=True, version=version, server_id=server_id)) return {'id': self._routers[-1], 'hostname': Servers().hostname(self._routers[-1])}
add new router (mongos) into existing configuration
Below is the the instruction that describes the task: ### Input: add new router (mongos) into existing configuration ### Response: def router_add(self, params): """add new router (mongos) into existing configuration""" if self.uses_rs_configdb: # Replica set configdb. rs_id = self._configsvrs[0] config_members = ReplicaSets().members(rs_id) configdb = '%s/%s' % ( rs_id, ','.join(m['host'] for m in config_members)) else: configdb = ','.join(Servers().hostname(item) for item in self._configsvrs) server_id = params.pop('server_id', None) version = params.pop('version', self._version) params.update({'configdb': configdb}) if self.enable_ipv6: common.enable_ipv6_single(params) # Remove flags that turn auth on. params = self._strip_auth(params) self._routers.append(Servers().create( 'mongos', params, sslParams=self.sslParams, autostart=True, version=version, server_id=server_id)) return {'id': self._routers[-1], 'hostname': Servers().hostname(self._routers[-1])}
def create(self): """ create the server. """ logger.info("creating server") self.library.Srv_Create.restype = snap7.snap7types.S7Object self.pointer = snap7.snap7types.S7Object(self.library.Srv_Create())
create the server.
Below is the the instruction that describes the task: ### Input: create the server. ### Response: def create(self): """ create the server. """ logger.info("creating server") self.library.Srv_Create.restype = snap7.snap7types.S7Object self.pointer = snap7.snap7types.S7Object(self.library.Srv_Create())
def autoExpand(self, level=None): """ Returns whether or not to expand for the inputed level. :param level | <int> || None :return <bool> """ return self._autoExpand.get(level, self._autoExpand.get(None, False))
Returns whether or not to expand for the inputed level. :param level | <int> || None :return <bool>
Below is the the instruction that describes the task: ### Input: Returns whether or not to expand for the inputed level. :param level | <int> || None :return <bool> ### Response: def autoExpand(self, level=None): """ Returns whether or not to expand for the inputed level. :param level | <int> || None :return <bool> """ return self._autoExpand.get(level, self._autoExpand.get(None, False))
def click_text(self, text, exact_match=False): """Click text identified by ``text``. By default tries to click first text involves given ``text``, if you would like to click exactly matching text, then set ``exact_match`` to `True`. If there are multiple use of ``text`` and you do not want first one, use `locator` with `Get Web Elements` instead. """ self._element_find_by_text(text,exact_match).click()
Click text identified by ``text``. By default tries to click first text involves given ``text``, if you would like to click exactly matching text, then set ``exact_match`` to `True`. If there are multiple use of ``text`` and you do not want first one, use `locator` with `Get Web Elements` instead.
Below is the the instruction that describes the task: ### Input: Click text identified by ``text``. By default tries to click first text involves given ``text``, if you would like to click exactly matching text, then set ``exact_match`` to `True`. If there are multiple use of ``text`` and you do not want first one, use `locator` with `Get Web Elements` instead. ### Response: def click_text(self, text, exact_match=False): """Click text identified by ``text``. By default tries to click first text involves given ``text``, if you would like to click exactly matching text, then set ``exact_match`` to `True`. If there are multiple use of ``text`` and you do not want first one, use `locator` with `Get Web Elements` instead. """ self._element_find_by_text(text,exact_match).click()
def exists(self, **kwargs): """Providing a partition is not necessary on topology; causes errors""" kwargs.pop('partition', None) kwargs['transform_name'] = True return self._exists(**kwargs)
Providing a partition is not necessary on topology; causes errors
Below is the the instruction that describes the task: ### Input: Providing a partition is not necessary on topology; causes errors ### Response: def exists(self, **kwargs): """Providing a partition is not necessary on topology; causes errors""" kwargs.pop('partition', None) kwargs['transform_name'] = True return self._exists(**kwargs)
async def reset(self, von_wallet: Wallet, seed: str = None) -> Wallet: """ Close and delete (open) VON anchor wallet and then create, open, and return replacement on current link secret. Note that this operation effectively destroys private keys for keyed data structures such as credential offers or credential definitions. Raise WalletState if the wallet is closed. :param von_wallet: open wallet :param seed: seed to use for new wallet (default random) :return: replacement wallet """ LOGGER.debug('WalletManager.reset >>> von_wallet %s', von_wallet) if not von_wallet.handle: LOGGER.debug('WalletManager.reset <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) w_config = von_wallet.config # wallet under reset, no need to make copy w_config['did'] = von_wallet.did w_config['seed'] = seed w_config['auto_create'] = von_wallet.auto_create # in case both auto_remove+auto_create set (create every open) w_config['auto_remove'] = von_wallet.auto_remove label = await von_wallet.get_link_secret_label() if label: w_config['link_secret_label'] = label await von_wallet.close() if not von_wallet.auto_remove: await self.remove(von_wallet) rv = await self.create(w_config, von_wallet.access) await rv.open() LOGGER.debug('WalletManager.reset <<< %s', rv) return rv
Close and delete (open) VON anchor wallet and then create, open, and return replacement on current link secret. Note that this operation effectively destroys private keys for keyed data structures such as credential offers or credential definitions. Raise WalletState if the wallet is closed. :param von_wallet: open wallet :param seed: seed to use for new wallet (default random) :return: replacement wallet
Below is the the instruction that describes the task: ### Input: Close and delete (open) VON anchor wallet and then create, open, and return replacement on current link secret. Note that this operation effectively destroys private keys for keyed data structures such as credential offers or credential definitions. Raise WalletState if the wallet is closed. :param von_wallet: open wallet :param seed: seed to use for new wallet (default random) :return: replacement wallet ### Response: async def reset(self, von_wallet: Wallet, seed: str = None) -> Wallet: """ Close and delete (open) VON anchor wallet and then create, open, and return replacement on current link secret. Note that this operation effectively destroys private keys for keyed data structures such as credential offers or credential definitions. Raise WalletState if the wallet is closed. :param von_wallet: open wallet :param seed: seed to use for new wallet (default random) :return: replacement wallet """ LOGGER.debug('WalletManager.reset >>> von_wallet %s', von_wallet) if not von_wallet.handle: LOGGER.debug('WalletManager.reset <!< Wallet %s is closed', von_wallet.name) raise WalletState('Wallet {} is closed'.format(von_wallet.name)) w_config = von_wallet.config # wallet under reset, no need to make copy w_config['did'] = von_wallet.did w_config['seed'] = seed w_config['auto_create'] = von_wallet.auto_create # in case both auto_remove+auto_create set (create every open) w_config['auto_remove'] = von_wallet.auto_remove label = await von_wallet.get_link_secret_label() if label: w_config['link_secret_label'] = label await von_wallet.close() if not von_wallet.auto_remove: await self.remove(von_wallet) rv = await self.create(w_config, von_wallet.access) await rv.open() LOGGER.debug('WalletManager.reset <<< %s', rv) return rv
def get_model_details(self, model_name): """Get details of the specified model from CloudML Service. Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name]. Returns: a dictionary of the model details. """ full_name = model_name if not model_name.startswith('projects/'): full_name = ('projects/%s/models/%s' % (self._project_id, model_name)) return self._api.projects().models().get(name=full_name).execute()
Get details of the specified model from CloudML Service. Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name]. Returns: a dictionary of the model details.
Below is the the instruction that describes the task: ### Input: Get details of the specified model from CloudML Service. Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name]. Returns: a dictionary of the model details. ### Response: def get_model_details(self, model_name): """Get details of the specified model from CloudML Service. Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name]. Returns: a dictionary of the model details. """ full_name = model_name if not model_name.startswith('projects/'): full_name = ('projects/%s/models/%s' % (self._project_id, model_name)) return self._api.projects().models().get(name=full_name).execute()
def rebuild(self): """Rebuild RIFF tree and index from streams.""" movi = self.riff.find('LIST', 'movi') movi.chunks = self.combine_streams() self.rebuild_index()
Rebuild RIFF tree and index from streams.
Below is the the instruction that describes the task: ### Input: Rebuild RIFF tree and index from streams. ### Response: def rebuild(self): """Rebuild RIFF tree and index from streams.""" movi = self.riff.find('LIST', 'movi') movi.chunks = self.combine_streams() self.rebuild_index()
def asRGB(self): """ Return image as RGB pixels. RGB colour images are passed through unchanged; greyscales are expanded into RGB triplets (there is a small speed overhead for doing this). An alpha channel in the source image will raise an exception. The return values are as for the :meth:`read` method except that the *metadata* reflect the returned pixels, not the source image. In particular, for this method ``metadata['greyscale']`` will be ``False``. """ width, height, pixels, meta = self.asDirect() if meta['alpha']: raise Error("will not convert image with alpha channel to RGB") if not meta['greyscale']: return width, height, pixels, meta meta['greyscale'] = False newarray = (newBarray, newHarray)[meta['bitdepth'] > 8] def iterrgb(): for row in pixels: a = newarray(3 * width) for i in range(3): a[i::3] = row yield a return width, height, iterrgb(), meta
Return image as RGB pixels. RGB colour images are passed through unchanged; greyscales are expanded into RGB triplets (there is a small speed overhead for doing this). An alpha channel in the source image will raise an exception. The return values are as for the :meth:`read` method except that the *metadata* reflect the returned pixels, not the source image. In particular, for this method ``metadata['greyscale']`` will be ``False``.
Below is the the instruction that describes the task: ### Input: Return image as RGB pixels. RGB colour images are passed through unchanged; greyscales are expanded into RGB triplets (there is a small speed overhead for doing this). An alpha channel in the source image will raise an exception. The return values are as for the :meth:`read` method except that the *metadata* reflect the returned pixels, not the source image. In particular, for this method ``metadata['greyscale']`` will be ``False``. ### Response: def asRGB(self): """ Return image as RGB pixels. RGB colour images are passed through unchanged; greyscales are expanded into RGB triplets (there is a small speed overhead for doing this). An alpha channel in the source image will raise an exception. The return values are as for the :meth:`read` method except that the *metadata* reflect the returned pixels, not the source image. In particular, for this method ``metadata['greyscale']`` will be ``False``. """ width, height, pixels, meta = self.asDirect() if meta['alpha']: raise Error("will not convert image with alpha channel to RGB") if not meta['greyscale']: return width, height, pixels, meta meta['greyscale'] = False newarray = (newBarray, newHarray)[meta['bitdepth'] > 8] def iterrgb(): for row in pixels: a = newarray(3 * width) for i in range(3): a[i::3] = row yield a return width, height, iterrgb(), meta
def markAsSpam(self, thread_id=None): """ Mark a thread as spam and delete it :param thread_id: User/Group ID to mark as spam. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, None) r = self._post(self.req_url.MARK_SPAM, {"id": thread_id}) return r.ok
Mark a thread as spam and delete it :param thread_id: User/Group ID to mark as spam. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed
Below is the the instruction that describes the task: ### Input: Mark a thread as spam and delete it :param thread_id: User/Group ID to mark as spam. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed ### Response: def markAsSpam(self, thread_id=None): """ Mark a thread as spam and delete it :param thread_id: User/Group ID to mark as spam. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, None) r = self._post(self.req_url.MARK_SPAM, {"id": thread_id}) return r.ok
def create_failure(self, exception=None): """ This returns an object implementing IFailedFuture. If exception is None (the default) we MUST be called within an "except" block (such that sys.exc_info() returns useful information). """ if exception: return FailedFuture(type(exception), exception, None) return FailedFuture(*sys.exc_info())
This returns an object implementing IFailedFuture. If exception is None (the default) we MUST be called within an "except" block (such that sys.exc_info() returns useful information).
Below is the the instruction that describes the task: ### Input: This returns an object implementing IFailedFuture. If exception is None (the default) we MUST be called within an "except" block (such that sys.exc_info() returns useful information). ### Response: def create_failure(self, exception=None): """ This returns an object implementing IFailedFuture. If exception is None (the default) we MUST be called within an "except" block (such that sys.exc_info() returns useful information). """ if exception: return FailedFuture(type(exception), exception, None) return FailedFuture(*sys.exc_info())
def _set_autoupload_param(self, v, load=False): """ Setter method for autoupload_param, mapped from YANG variable /support/autoupload_param (container) If this variable is read-only (config: false) in the source YANG file, then _set_autoupload_param is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autoupload_param() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=autoupload_param.autoupload_param, is_container='container', presence=False, yang_name="autoupload-param", rest_name="autoupload-param", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure autoupload parameters', u'callpoint': u'RASAutoUploadCallPoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """autoupload_param must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=autoupload_param.autoupload_param, is_container='container', presence=False, yang_name="autoupload-param", rest_name="autoupload-param", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure autoupload parameters', u'callpoint': u'RASAutoUploadCallPoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)""", }) self.__autoupload_param = t if hasattr(self, '_set'): self._set()
Setter method for autoupload_param, mapped from YANG variable /support/autoupload_param (container) If this variable is read-only (config: false) in the source YANG file, then _set_autoupload_param is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autoupload_param() directly.
Below is the the instruction that describes the task: ### Input: Setter method for autoupload_param, mapped from YANG variable /support/autoupload_param (container) If this variable is read-only (config: false) in the source YANG file, then _set_autoupload_param is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autoupload_param() directly. ### Response: def _set_autoupload_param(self, v, load=False): """ Setter method for autoupload_param, mapped from YANG variable /support/autoupload_param (container) If this variable is read-only (config: false) in the source YANG file, then _set_autoupload_param is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_autoupload_param() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=autoupload_param.autoupload_param, is_container='container', presence=False, yang_name="autoupload-param", rest_name="autoupload-param", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure autoupload parameters', u'callpoint': u'RASAutoUploadCallPoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """autoupload_param must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=autoupload_param.autoupload_param, is_container='container', presence=False, yang_name="autoupload-param", rest_name="autoupload-param", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure autoupload parameters', u'callpoint': u'RASAutoUploadCallPoint', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)""", }) self.__autoupload_param = t if hasattr(self, '_set'): self._set()
def _check_update_(self): """Check if the current version of the library is outdated.""" try: data = requests.get("https://pypi.python.org/pypi/jira/json", timeout=2.001).json() released_version = data['info']['version'] if parse_version(released_version) > parse_version(__version__): warnings.warn( "You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % ( __version__, released_version)) except requests.RequestException: pass except Exception as e: logging.warning(e)
Check if the current version of the library is outdated.
Below is the the instruction that describes the task: ### Input: Check if the current version of the library is outdated. ### Response: def _check_update_(self): """Check if the current version of the library is outdated.""" try: data = requests.get("https://pypi.python.org/pypi/jira/json", timeout=2.001).json() released_version = data['info']['version'] if parse_version(released_version) > parse_version(__version__): warnings.warn( "You are running an outdated version of JIRA Python %s. Current version is %s. Do not file any bugs against older versions." % ( __version__, released_version)) except requests.RequestException: pass except Exception as e: logging.warning(e)
def _get_result(self, resource): """ Converts the given resource to a result to be returned from the view. Unless a custom renderer is employed, this will involve creating a representer and using it to convert the resource to a string. :param resource: Resource to convert. :type resource: Object implementing :class:`evererst.interfaces.IResource`. :returns: :class:`pyramid.reposnse.Response` object or a dictionary with a single key "context" mapped to the given resource (to be passed on to a custom renderer). """ if self._convert_response: self._update_response_body(resource) result = self.request.response else: result = dict(context=resource) return result
Converts the given resource to a result to be returned from the view. Unless a custom renderer is employed, this will involve creating a representer and using it to convert the resource to a string. :param resource: Resource to convert. :type resource: Object implementing :class:`evererst.interfaces.IResource`. :returns: :class:`pyramid.reposnse.Response` object or a dictionary with a single key "context" mapped to the given resource (to be passed on to a custom renderer).
Below is the the instruction that describes the task: ### Input: Converts the given resource to a result to be returned from the view. Unless a custom renderer is employed, this will involve creating a representer and using it to convert the resource to a string. :param resource: Resource to convert. :type resource: Object implementing :class:`evererst.interfaces.IResource`. :returns: :class:`pyramid.reposnse.Response` object or a dictionary with a single key "context" mapped to the given resource (to be passed on to a custom renderer). ### Response: def _get_result(self, resource): """ Converts the given resource to a result to be returned from the view. Unless a custom renderer is employed, this will involve creating a representer and using it to convert the resource to a string. :param resource: Resource to convert. :type resource: Object implementing :class:`evererst.interfaces.IResource`. :returns: :class:`pyramid.reposnse.Response` object or a dictionary with a single key "context" mapped to the given resource (to be passed on to a custom renderer). """ if self._convert_response: self._update_response_body(resource) result = self.request.response else: result = dict(context=resource) return result
def weight_unit(self, weight_unit): """Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str """ allowed_values = ["pound", "kilogram"] # noqa: E501 if weight_unit is not None and weight_unit not in allowed_values: raise ValueError( "Invalid value for `weight_unit` ({0}), must be one of {1}" # noqa: E501 .format(weight_unit, allowed_values) ) self._weight_unit = weight_unit
Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str
Below is the the instruction that describes the task: ### Input: Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str ### Response: def weight_unit(self, weight_unit): """Sets the weight_unit of this MeasurementSettings. :param weight_unit: The weight_unit of this MeasurementSettings. :type: str """ allowed_values = ["pound", "kilogram"] # noqa: E501 if weight_unit is not None and weight_unit not in allowed_values: raise ValueError( "Invalid value for `weight_unit` ({0}), must be one of {1}" # noqa: E501 .format(weight_unit, allowed_values) ) self._weight_unit = weight_unit
def notify3_d_event(self, type_p, data): """Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type """ if not isinstance(type_p, baseinteger): raise TypeError("type_p can only be an instance of type baseinteger") if not isinstance(data, list): raise TypeError("data can only be an instance of type list") for a in data[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") self._call("notify3DEvent", in_p=[type_p, data])
Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type
Below is the the instruction that describes the task: ### Input: Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type ### Response: def notify3_d_event(self, type_p, data): """Notifies framebuffer about 3D backend event. in type_p of type int event type. Currently only VBOX3D_NOTIFY_EVENT_TYPE_VISIBLE_3DDATA is supported. in data of type str event-specific data, depends on the supplied event type """ if not isinstance(type_p, baseinteger): raise TypeError("type_p can only be an instance of type baseinteger") if not isinstance(data, list): raise TypeError("data can only be an instance of type list") for a in data[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") self._call("notify3DEvent", in_p=[type_p, data])
def backward_word(event): """ Move back to the start of the current or previous word. Words are composed of letters and digits. """ buff = event.current_buffer pos = buff.document.find_previous_word_beginning(count=event.arg) if pos: buff.cursor_position += pos
Move back to the start of the current or previous word. Words are composed of letters and digits.
Below is the the instruction that describes the task: ### Input: Move back to the start of the current or previous word. Words are composed of letters and digits. ### Response: def backward_word(event): """ Move back to the start of the current or previous word. Words are composed of letters and digits. """ buff = event.current_buffer pos = buff.document.find_previous_word_beginning(count=event.arg) if pos: buff.cursor_position += pos
def stream_bloom_filters(dataset, # type: Iterable[Sequence[Text]] keys, # type: Sequence[Sequence[bytes]] schema # type: Schema ): # type: (...) -> Iterable[Tuple[bitarray, Text, int]] """ Compute composite Bloom filters (CLKs) for every record in an iterable dataset. :param dataset: An iterable of indexable records. :param schema: An instantiated Schema instance :param keys: A tuple of two lists of secret keys used in the HMAC. :return: Generator yielding bloom filters as 3-tuples """ tokenizers = [tokenizer.get_tokenizer(field.hashing_properties) for field in schema.fields] return (crypto_bloom_filter(s, tokenizers, schema, keys) for s in dataset)
Compute composite Bloom filters (CLKs) for every record in an iterable dataset. :param dataset: An iterable of indexable records. :param schema: An instantiated Schema instance :param keys: A tuple of two lists of secret keys used in the HMAC. :return: Generator yielding bloom filters as 3-tuples
Below is the the instruction that describes the task: ### Input: Compute composite Bloom filters (CLKs) for every record in an iterable dataset. :param dataset: An iterable of indexable records. :param schema: An instantiated Schema instance :param keys: A tuple of two lists of secret keys used in the HMAC. :return: Generator yielding bloom filters as 3-tuples ### Response: def stream_bloom_filters(dataset, # type: Iterable[Sequence[Text]] keys, # type: Sequence[Sequence[bytes]] schema # type: Schema ): # type: (...) -> Iterable[Tuple[bitarray, Text, int]] """ Compute composite Bloom filters (CLKs) for every record in an iterable dataset. :param dataset: An iterable of indexable records. :param schema: An instantiated Schema instance :param keys: A tuple of two lists of secret keys used in the HMAC. :return: Generator yielding bloom filters as 3-tuples """ tokenizers = [tokenizer.get_tokenizer(field.hashing_properties) for field in schema.fields] return (crypto_bloom_filter(s, tokenizers, schema, keys) for s in dataset)
def to_line_string(self, closed=True): """ Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string. """ from imgaug.augmentables.lines import LineString if not closed or len(self.exterior) <= 1: return LineString(self.exterior, label=self.label) return LineString( np.concatenate([self.exterior, self.exterior[0:1, :]], axis=0), label=self.label)
Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string.
Below is the the instruction that describes the task: ### Input: Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string. ### Response: def to_line_string(self, closed=True): """ Convert this polygon's `exterior` to a ``LineString`` instance. Parameters ---------- closed : bool, optional Whether to close the line string, i.e. to add the first point of the `exterior` also as the last point at the end of the line string. This has no effect if the polygon has a single point or zero points. Returns ------- imgaug.augmentables.lines.LineString Exterior of the polygon as a line string. """ from imgaug.augmentables.lines import LineString if not closed or len(self.exterior) <= 1: return LineString(self.exterior, label=self.label) return LineString( np.concatenate([self.exterior, self.exterior[0:1, :]], axis=0), label=self.label)
async def _dhcp_handler(self): """ Mini DHCP server, respond DHCP packets from OpenFlow """ conn = self._connection ofdef = self._connection.openflowdef l3 = self._parent._gettableindex('l3input', self._connection.protocol.vhost) dhcp_packet_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN, None, None, l3, 1, self._connection, self._connection.connmark) # These tags are important options. They are sent first to make sure the client # correctly receive these options. required_tags = [d.OPTION_MESSAGE_TYPE, d.OPTION_SERVER_IDENTIFIER, d.OPTION_NETMASK, d.OPTION_ROUTER, d.OPTION_DNSSERVER, d.OPTION_BROADCAST, d.OPTION_MTU, d.OPTION_LEASE_TIME, d.OPTION_T1, d.OPTION_T2] server_mac = mac_addr(self._parent.servermac) # IP fragment identifier trans_id = uint16.create(os.urandom(2)) def set_options(payload, option_dict, provide_options, message_type, remove_lease = False): """ Set DHCP options to output payload regarding the incoming request :param payload: output DHCP payload :param option_dict: incoming DHCP options in request :param provide_options: all DHCP options that are ready to sent to the client :param message_type: output DHCP message type :param remove_lease: remove all leases options. DHCPINFORM cannot contain leases options. See https://tools.ietf.org/html/rfc2131#section-3.4 """ message_type_opt = d.dhcp_option_message_type(value = message_type) if d.OPTION_REQUESTED_OPTIONS in option_dict: # First requested options, then required options, then others reqs = set(option_dict[d.OPTION_REQUESTED_OPTIONS].value) send_tags = [t for t in option_dict[d.OPTION_REQUESTED_OPTIONS].value if t == d.OPTION_MESSAGE_TYPE or t in provide_options] \ + [t for t in required_tags if (t in provide_options or t == d.OPTION_MESSAGE_TYPE) and t not in reqs] \ + [t for t in provide_options if t not in reqs and t not in required_tags] else: # Required options, then others send_tags = [t for t in required_tags if t in provide_options or t == d.OPTION_MESSAGE_TYPE] \ + [t for t in set(provide_options.keys()).difference(required_tags)] # If the client has sent an option for max message size, use it; or use the RFC required 576 bytes not_finished = d.build_options( payload, [message_type_opt if t == d.OPTION_MESSAGE_TYPE else provide_options[t] for t in send_tags if not remove_lease or (t != d.OPTION_LEASE_TIME and t != d.OPTION_T1 and t != OPTION_T2)], max(min(option_dict[d.OPTION_MAX_MESSAGE_SIZE].value, 1400), 576) if d.OPTION_MAX_MESSAGE_SIZE in option_dict else 576) async def send_packet(pid, packet): """ Send DHCP packet to specified port """ await self.execute_commands(conn, [ofdef.ofp_packet_out( buffer_id = ofdef.OFP_NO_BUFFER, in_port = ofdef.OFPP_CONTROLLER, actions = [ofdef.ofp_action_output(port = pid, max_len = ofdef.OFPCML_NO_BUFFER)], data = packet._tobytes() )]) while True: ev = await dhcp_packet_matcher msg = ev.message try: in_port = ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.OXM_OF_IN_PORT)) if in_port not in self._dhcpentries: # Not a DHCP-enabled port continue port_mac, port_ip, server_ip, provide_options = self._dhcpentries[in_port] # Fragmented DHCP packets are not supported - this is allowed according # to RFC: server and clients are only needed to support at least 576 bytes # DHCP messages. l7_packet = ethernet_l7.create(msg.data) dhcp_packet = d.dhcp_payload.create(l7_packet.data) # We only process a DHCP request directly sent from the logical port if (dhcp_packet.op != d.BOOTREQUEST # A DHCP server packet or dhcp_packet.hlen != 6 or dhcp_packet.htype != 1 # Hardware address not ethernet (48-bit) or dhcp_packet.magic_cookie != d.BOOTP_MAGIC_COOKIE # Magic cookie not match or dhcp_packet.giaddr != 0): # A relayed DHCP message raise ValueError('Unsupported DHCP packet') # Reassemble DHCP options options = d.reassemble_options(dhcp_packet) option_dict = dict((o.tag, o) for o in options) if d.OPTION_MESSAGE_TYPE not in option_dict: raise ValueError('Message type not found') message_type = option_dict[d.OPTION_MESSAGE_TYPE].value is_nak = False if message_type == d.DHCPDISCOVER: # A DHCPDISCOVER should get a DHCPOFFER response if dhcp_packet.chaddr[:6].ljust(6, b'\x00') != mac_addr.tobytes(port_mac): # MAC address not match, ignore this packet continue dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = 0, yiaddr = port_ip, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPOFFER) elif message_type == d.DHCPREQUEST: # A DHCPREQUEST should get a DHCPACK reply if d.OPTION_SERVER_IDENTIFIER in option_dict and option_dict[d.OPTION_SERVER_IDENTIFIER].value != server_ip: # Ignore packets to wrong address continue if dhcp_packet.chaddr[:6].ljust(6, b'\x00') != mac_addr.tobytes(port_mac) \ or (d.OPTION_REQUESTED_IP in option_dict and option_dict[d.OPTION_REQUESTED_IP].value != port_ip) \ or (dhcp_packet.ciaddr != 0 and dhcp_packet.ciaddr != port_ip): # Requested MAC or IP not matched, send a NACK dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = 0, yiaddr = 0, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) # Do not send more options in NACK d.build_options(dhcp_reply, [d.dhcp_option_message_type(value = d.DHCPNAK), d.dhcp_option_address(tag = d.OPTION_SERVER_IDENTIFIER, value = server_ip)], 576, 0) is_nak = True else: dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = dhcp_packet.ciaddr, yiaddr = port_ip, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPACK) elif message_type == d.DHCPDECLINE: # Address already in use? self._logger.warning('DHCP client reports DHCPDECLINE, there should be some problems.'\ ' Connection = %r(%016x), port = %d.', self._connection, self._connection.openflow_datapathid) elif message_type == d.DHCPRELEASE: # Safe to ignore, we do not use a dynamic IP address pool continue elif message_type == d.DHCPINFORM: # Client setup IP addresses itself, but requesting more information # DHCPINFORM reply cannot have lease options, and yiaddr = 0 dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = dhcp_packet.ciaddr, yiaddr = 0, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPACK, True) trans_id = (trans_id + 1) & 0xffff if (dhcp_packet.flags & d.DHCPFLAG_BROADCAST) or is_nak: # client request broadcast, or DHCPNAK # RFC requires that DHCPNAK uses broadcast dl_dst = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff] ip_dst = 0xffffffff else: dl_dst = l7_packet.dl_src ip_dst = port_ip reply_packet = ip4_packet_l7((ip4_payload, ip4_udp_payload), dl_src = server_mac, dl_dst = dl_dst, identifier = trans_id, ttl = 128, ip_src = server_ip, ip_dst = ip_dst, sport = 67, dport = 68, data = dhcp_reply._tobytes() ) # Send packet to the incoming port self.subroutine(send_packet(in_port, reply_packet), True) except Exception: self._logger.info('Invalid DHCP packet received: %r', msg.data, exc_info = True)
Mini DHCP server, respond DHCP packets from OpenFlow
Below is the the instruction that describes the task: ### Input: Mini DHCP server, respond DHCP packets from OpenFlow ### Response: async def _dhcp_handler(self): """ Mini DHCP server, respond DHCP packets from OpenFlow """ conn = self._connection ofdef = self._connection.openflowdef l3 = self._parent._gettableindex('l3input', self._connection.protocol.vhost) dhcp_packet_matcher = OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN, None, None, l3, 1, self._connection, self._connection.connmark) # These tags are important options. They are sent first to make sure the client # correctly receive these options. required_tags = [d.OPTION_MESSAGE_TYPE, d.OPTION_SERVER_IDENTIFIER, d.OPTION_NETMASK, d.OPTION_ROUTER, d.OPTION_DNSSERVER, d.OPTION_BROADCAST, d.OPTION_MTU, d.OPTION_LEASE_TIME, d.OPTION_T1, d.OPTION_T2] server_mac = mac_addr(self._parent.servermac) # IP fragment identifier trans_id = uint16.create(os.urandom(2)) def set_options(payload, option_dict, provide_options, message_type, remove_lease = False): """ Set DHCP options to output payload regarding the incoming request :param payload: output DHCP payload :param option_dict: incoming DHCP options in request :param provide_options: all DHCP options that are ready to sent to the client :param message_type: output DHCP message type :param remove_lease: remove all leases options. DHCPINFORM cannot contain leases options. See https://tools.ietf.org/html/rfc2131#section-3.4 """ message_type_opt = d.dhcp_option_message_type(value = message_type) if d.OPTION_REQUESTED_OPTIONS in option_dict: # First requested options, then required options, then others reqs = set(option_dict[d.OPTION_REQUESTED_OPTIONS].value) send_tags = [t for t in option_dict[d.OPTION_REQUESTED_OPTIONS].value if t == d.OPTION_MESSAGE_TYPE or t in provide_options] \ + [t for t in required_tags if (t in provide_options or t == d.OPTION_MESSAGE_TYPE) and t not in reqs] \ + [t for t in provide_options if t not in reqs and t not in required_tags] else: # Required options, then others send_tags = [t for t in required_tags if t in provide_options or t == d.OPTION_MESSAGE_TYPE] \ + [t for t in set(provide_options.keys()).difference(required_tags)] # If the client has sent an option for max message size, use it; or use the RFC required 576 bytes not_finished = d.build_options( payload, [message_type_opt if t == d.OPTION_MESSAGE_TYPE else provide_options[t] for t in send_tags if not remove_lease or (t != d.OPTION_LEASE_TIME and t != d.OPTION_T1 and t != OPTION_T2)], max(min(option_dict[d.OPTION_MAX_MESSAGE_SIZE].value, 1400), 576) if d.OPTION_MAX_MESSAGE_SIZE in option_dict else 576) async def send_packet(pid, packet): """ Send DHCP packet to specified port """ await self.execute_commands(conn, [ofdef.ofp_packet_out( buffer_id = ofdef.OFP_NO_BUFFER, in_port = ofdef.OFPP_CONTROLLER, actions = [ofdef.ofp_action_output(port = pid, max_len = ofdef.OFPCML_NO_BUFFER)], data = packet._tobytes() )]) while True: ev = await dhcp_packet_matcher msg = ev.message try: in_port = ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields, ofdef.OXM_OF_IN_PORT)) if in_port not in self._dhcpentries: # Not a DHCP-enabled port continue port_mac, port_ip, server_ip, provide_options = self._dhcpentries[in_port] # Fragmented DHCP packets are not supported - this is allowed according # to RFC: server and clients are only needed to support at least 576 bytes # DHCP messages. l7_packet = ethernet_l7.create(msg.data) dhcp_packet = d.dhcp_payload.create(l7_packet.data) # We only process a DHCP request directly sent from the logical port if (dhcp_packet.op != d.BOOTREQUEST # A DHCP server packet or dhcp_packet.hlen != 6 or dhcp_packet.htype != 1 # Hardware address not ethernet (48-bit) or dhcp_packet.magic_cookie != d.BOOTP_MAGIC_COOKIE # Magic cookie not match or dhcp_packet.giaddr != 0): # A relayed DHCP message raise ValueError('Unsupported DHCP packet') # Reassemble DHCP options options = d.reassemble_options(dhcp_packet) option_dict = dict((o.tag, o) for o in options) if d.OPTION_MESSAGE_TYPE not in option_dict: raise ValueError('Message type not found') message_type = option_dict[d.OPTION_MESSAGE_TYPE].value is_nak = False if message_type == d.DHCPDISCOVER: # A DHCPDISCOVER should get a DHCPOFFER response if dhcp_packet.chaddr[:6].ljust(6, b'\x00') != mac_addr.tobytes(port_mac): # MAC address not match, ignore this packet continue dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = 0, yiaddr = port_ip, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPOFFER) elif message_type == d.DHCPREQUEST: # A DHCPREQUEST should get a DHCPACK reply if d.OPTION_SERVER_IDENTIFIER in option_dict and option_dict[d.OPTION_SERVER_IDENTIFIER].value != server_ip: # Ignore packets to wrong address continue if dhcp_packet.chaddr[:6].ljust(6, b'\x00') != mac_addr.tobytes(port_mac) \ or (d.OPTION_REQUESTED_IP in option_dict and option_dict[d.OPTION_REQUESTED_IP].value != port_ip) \ or (dhcp_packet.ciaddr != 0 and dhcp_packet.ciaddr != port_ip): # Requested MAC or IP not matched, send a NACK dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = 0, yiaddr = 0, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) # Do not send more options in NACK d.build_options(dhcp_reply, [d.dhcp_option_message_type(value = d.DHCPNAK), d.dhcp_option_address(tag = d.OPTION_SERVER_IDENTIFIER, value = server_ip)], 576, 0) is_nak = True else: dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = dhcp_packet.ciaddr, yiaddr = port_ip, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPACK) elif message_type == d.DHCPDECLINE: # Address already in use? self._logger.warning('DHCP client reports DHCPDECLINE, there should be some problems.'\ ' Connection = %r(%016x), port = %d.', self._connection, self._connection.openflow_datapathid) elif message_type == d.DHCPRELEASE: # Safe to ignore, we do not use a dynamic IP address pool continue elif message_type == d.DHCPINFORM: # Client setup IP addresses itself, but requesting more information # DHCPINFORM reply cannot have lease options, and yiaddr = 0 dhcp_reply = d.dhcp_payload(op = d.BOOTREPLY, htype = 1, hlen = 6, hops = 0, xid = dhcp_packet.xid, secs = 0, flags = dhcp_packet.flags, ciaddr = dhcp_packet.ciaddr, yiaddr = 0, siaddr = 0, giaddr = dhcp_packet.giaddr, chaddr = dhcp_packet.chaddr, magic_cookie = d.BOOTP_MAGIC_COOKIE ) set_options(dhcp_reply, option_dict, provide_options, d.DHCPACK, True) trans_id = (trans_id + 1) & 0xffff if (dhcp_packet.flags & d.DHCPFLAG_BROADCAST) or is_nak: # client request broadcast, or DHCPNAK # RFC requires that DHCPNAK uses broadcast dl_dst = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff] ip_dst = 0xffffffff else: dl_dst = l7_packet.dl_src ip_dst = port_ip reply_packet = ip4_packet_l7((ip4_payload, ip4_udp_payload), dl_src = server_mac, dl_dst = dl_dst, identifier = trans_id, ttl = 128, ip_src = server_ip, ip_dst = ip_dst, sport = 67, dport = 68, data = dhcp_reply._tobytes() ) # Send packet to the incoming port self.subroutine(send_packet(in_port, reply_packet), True) except Exception: self._logger.info('Invalid DHCP packet received: %r', msg.data, exc_info = True)
def _validate_inputs(actual_inputs, required_inputs, keypath=None): """ Validate inputs. Raise exception if something is missing. args: actual_inputs: the object/dictionary passed to a subclass of PublishablePayload required_inputs: the object/dictionary containing keys (and subkeys) for required fields. (See get_common_payload_template.) keypath: used internally in recursive function calls. return: Nothing. An exception will be raised if a problem is encountered. """ actual_keys = set(actual_inputs.keys()) required_keys = set(required_inputs.keys()) if actual_keys.intersection(required_keys) != required_keys: prefix = '%s.' if keypath else '' output_keys = {'%s%s' % (prefix, key) for key in required_keys} raise Exception("Missing input fields. Expected %s." % ', '.join(output_keys)) for key in required_keys: # TODO: review the following usage of isinstance. # Will this always be appropriate, given duck typing? if isinstance(required_inputs[key], dict): new_keypath = key if not keypath else '%s.%s' % (keypath, key) _validate_inputs( actual_inputs=actual_inputs[key], required_inputs=required_inputs[key], keypath=new_keypath )
Validate inputs. Raise exception if something is missing. args: actual_inputs: the object/dictionary passed to a subclass of PublishablePayload required_inputs: the object/dictionary containing keys (and subkeys) for required fields. (See get_common_payload_template.) keypath: used internally in recursive function calls. return: Nothing. An exception will be raised if a problem is encountered.
Below is the the instruction that describes the task: ### Input: Validate inputs. Raise exception if something is missing. args: actual_inputs: the object/dictionary passed to a subclass of PublishablePayload required_inputs: the object/dictionary containing keys (and subkeys) for required fields. (See get_common_payload_template.) keypath: used internally in recursive function calls. return: Nothing. An exception will be raised if a problem is encountered. ### Response: def _validate_inputs(actual_inputs, required_inputs, keypath=None): """ Validate inputs. Raise exception if something is missing. args: actual_inputs: the object/dictionary passed to a subclass of PublishablePayload required_inputs: the object/dictionary containing keys (and subkeys) for required fields. (See get_common_payload_template.) keypath: used internally in recursive function calls. return: Nothing. An exception will be raised if a problem is encountered. """ actual_keys = set(actual_inputs.keys()) required_keys = set(required_inputs.keys()) if actual_keys.intersection(required_keys) != required_keys: prefix = '%s.' if keypath else '' output_keys = {'%s%s' % (prefix, key) for key in required_keys} raise Exception("Missing input fields. Expected %s." % ', '.join(output_keys)) for key in required_keys: # TODO: review the following usage of isinstance. # Will this always be appropriate, given duck typing? if isinstance(required_inputs[key], dict): new_keypath = key if not keypath else '%s.%s' % (keypath, key) _validate_inputs( actual_inputs=actual_inputs[key], required_inputs=required_inputs[key], keypath=new_keypath )
def for_version(self, version_guid): """ Return a UsageLocator for the same block in a different version of the library. """ return self.replace(library_key=self.library_key.for_version(version_guid))
Return a UsageLocator for the same block in a different version of the library.
Below is the the instruction that describes the task: ### Input: Return a UsageLocator for the same block in a different version of the library. ### Response: def for_version(self, version_guid): """ Return a UsageLocator for the same block in a different version of the library. """ return self.replace(library_key=self.library_key.for_version(version_guid))
def init_debug(): """Initialise debug_stats and QueueLength (this is not a reset)""" global debug_stats global QueueLength if debug_stats is None: list_defaultdict = partial(defaultdict, list) debug_stats = defaultdict(list_defaultdict) QueueLength = []
Initialise debug_stats and QueueLength (this is not a reset)
Below is the the instruction that describes the task: ### Input: Initialise debug_stats and QueueLength (this is not a reset) ### Response: def init_debug(): """Initialise debug_stats and QueueLength (this is not a reset)""" global debug_stats global QueueLength if debug_stats is None: list_defaultdict = partial(defaultdict, list) debug_stats = defaultdict(list_defaultdict) QueueLength = []
def _load_config(config_filepath: str) -> Dict[str, Any]: """ Loads YAML config file to dictionary :returns: dict from YAML config file """ try: config: Dict[str, Any] = yaml.load(stream=open(config_filepath, "r")) except Exception as e: raise Exception(f"Invalid DAG Factory config file; err: {e}") return config
Loads YAML config file to dictionary :returns: dict from YAML config file
Below is the the instruction that describes the task: ### Input: Loads YAML config file to dictionary :returns: dict from YAML config file ### Response: def _load_config(config_filepath: str) -> Dict[str, Any]: """ Loads YAML config file to dictionary :returns: dict from YAML config file """ try: config: Dict[str, Any] = yaml.load(stream=open(config_filepath, "r")) except Exception as e: raise Exception(f"Invalid DAG Factory config file; err: {e}") return config
def add_clause(self, clause): """Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None """ if clause.query_loc == MongoClause.LOC_MAIN: self._main.append(clause) elif clause.query_loc == MongoClause.LOC_MAIN2: self._main2.append(clause) elif clause.query_loc == MongoClause.LOC_WHERE: self._where.append(clause) else: raise RuntimeError('bad clause location: {}'.format(clause.query_loc))
Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None
Below is the the instruction that describes the task: ### Input: Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None ### Response: def add_clause(self, clause): """Add a new clause to the existing query. :param clause: The clause to add :type clause: MongoClause :return: None """ if clause.query_loc == MongoClause.LOC_MAIN: self._main.append(clause) elif clause.query_loc == MongoClause.LOC_MAIN2: self._main2.append(clause) elif clause.query_loc == MongoClause.LOC_WHERE: self._where.append(clause) else: raise RuntimeError('bad clause location: {}'.format(clause.query_loc))
def _exec_command(adb_cmd): """ Format adb command and execute it in shell :param adb_cmd: list adb command to execute :return: string '0' and shell command output if successful, otherwise raise CalledProcessError exception and return error code """ t = tempfile.TemporaryFile() final_adb_cmd = [] for e in adb_cmd: if e != '': # avoid items with empty string... final_adb_cmd.append(e) # ... so that final command doesn't # contain extra spaces print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command') try: output = check_output(final_adb_cmd, stderr=t) except CalledProcessError as e: t.seek(0) result = e.returncode, t.read() else: result = 0, output print('\n' + result[1]) return result
Format adb command and execute it in shell :param adb_cmd: list adb command to execute :return: string '0' and shell command output if successful, otherwise raise CalledProcessError exception and return error code
Below is the the instruction that describes the task: ### Input: Format adb command and execute it in shell :param adb_cmd: list adb command to execute :return: string '0' and shell command output if successful, otherwise raise CalledProcessError exception and return error code ### Response: def _exec_command(adb_cmd): """ Format adb command and execute it in shell :param adb_cmd: list adb command to execute :return: string '0' and shell command output if successful, otherwise raise CalledProcessError exception and return error code """ t = tempfile.TemporaryFile() final_adb_cmd = [] for e in adb_cmd: if e != '': # avoid items with empty string... final_adb_cmd.append(e) # ... so that final command doesn't # contain extra spaces print('\n*** Executing ' + ' '.join(adb_cmd) + ' ' + 'command') try: output = check_output(final_adb_cmd, stderr=t) except CalledProcessError as e: t.seek(0) result = e.returncode, t.read() else: result = 0, output print('\n' + result[1]) return result
def _realloc(self, ptr, size): """ Handler for any libc `realloc` SimProcedure call. If the heap has faithful support for `realloc`, it ought to be implemented in a `realloc` function (as opposed to the `_realloc` function). :param ptr: the location in memory to be reallocated :param size: the new size desired for the allocation """ raise NotImplementedError("%s not implemented for %s" % (self._realloc.__func__.__name__, self.__class__.__name__))
Handler for any libc `realloc` SimProcedure call. If the heap has faithful support for `realloc`, it ought to be implemented in a `realloc` function (as opposed to the `_realloc` function). :param ptr: the location in memory to be reallocated :param size: the new size desired for the allocation
Below is the the instruction that describes the task: ### Input: Handler for any libc `realloc` SimProcedure call. If the heap has faithful support for `realloc`, it ought to be implemented in a `realloc` function (as opposed to the `_realloc` function). :param ptr: the location in memory to be reallocated :param size: the new size desired for the allocation ### Response: def _realloc(self, ptr, size): """ Handler for any libc `realloc` SimProcedure call. If the heap has faithful support for `realloc`, it ought to be implemented in a `realloc` function (as opposed to the `_realloc` function). :param ptr: the location in memory to be reallocated :param size: the new size desired for the allocation """ raise NotImplementedError("%s not implemented for %s" % (self._realloc.__func__.__name__, self.__class__.__name__))
def remove_record(self, common_name): """Delete the record associated with this common name""" bundle = self.get_files(common_name) num_signees = len(Counter(bundle.record['signees'])) if bundle.is_ca() and num_signees > 0: raise CertificateAuthorityInUseError( "Authority {name} has signed {x} certificates" .format(name=common_name, x=num_signees) ) try: ca_name = bundle.record['parent_ca'] ca_record = self.get_record(ca_name) self.remove_sign_link(ca_name, common_name) except CertNotFoundError: pass record_copy = dict(self.store[common_name]) del self.store[common_name] self.save() return record_copy
Delete the record associated with this common name
Below is the the instruction that describes the task: ### Input: Delete the record associated with this common name ### Response: def remove_record(self, common_name): """Delete the record associated with this common name""" bundle = self.get_files(common_name) num_signees = len(Counter(bundle.record['signees'])) if bundle.is_ca() and num_signees > 0: raise CertificateAuthorityInUseError( "Authority {name} has signed {x} certificates" .format(name=common_name, x=num_signees) ) try: ca_name = bundle.record['parent_ca'] ca_record = self.get_record(ca_name) self.remove_sign_link(ca_name, common_name) except CertNotFoundError: pass record_copy = dict(self.store[common_name]) del self.store[common_name] self.save() return record_copy
def thread_stopped(self): """ :meth:`.WThreadTask._polling_iteration` implementation """ if self.__current_task is not None: task = self.__task_chain[self.__current_task] task.stop() self.__current_task = None
:meth:`.WThreadTask._polling_iteration` implementation
Below is the the instruction that describes the task: ### Input: :meth:`.WThreadTask._polling_iteration` implementation ### Response: def thread_stopped(self): """ :meth:`.WThreadTask._polling_iteration` implementation """ if self.__current_task is not None: task = self.__task_chain[self.__current_task] task.stop() self.__current_task = None
def wrap_httplib_request(request_func): """Wrap the httplib request function to trace. Create a new span and update and close the span in the response later. """ def call(self, method, url, body, headers, *args, **kwargs): _tracer = execution_context.get_opencensus_tracer() blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames') dest_url = '{}:{}'.format(self._dns_host, self.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return request_func(self, method, url, body, headers, *args, **kwargs) _span = _tracer.start_span() _span.span_kind = span_module.SpanKind.CLIENT _span.name = '[httplib]{}'.format(request_func.__name__) # Add the request url to attributes _tracer.add_attribute_to_current_span(HTTP_URL, url) # Add the request method to attributes _tracer.add_attribute_to_current_span(HTTP_METHOD, method) # Store the current span id to thread local. execution_context.set_opencensus_attr( 'httplib/current_span_id', _span.span_id) try: headers = headers.copy() headers.update(_tracer.propagator.to_headers( _span.context_tracer.span_context)) except Exception: # pragma: NO COVER pass return request_func(self, method, url, body, headers, *args, **kwargs) return call
Wrap the httplib request function to trace. Create a new span and update and close the span in the response later.
Below is the the instruction that describes the task: ### Input: Wrap the httplib request function to trace. Create a new span and update and close the span in the response later. ### Response: def wrap_httplib_request(request_func): """Wrap the httplib request function to trace. Create a new span and update and close the span in the response later. """ def call(self, method, url, body, headers, *args, **kwargs): _tracer = execution_context.get_opencensus_tracer() blacklist_hostnames = execution_context.get_opencensus_attr( 'blacklist_hostnames') dest_url = '{}:{}'.format(self._dns_host, self.port) if utils.disable_tracing_hostname(dest_url, blacklist_hostnames): return request_func(self, method, url, body, headers, *args, **kwargs) _span = _tracer.start_span() _span.span_kind = span_module.SpanKind.CLIENT _span.name = '[httplib]{}'.format(request_func.__name__) # Add the request url to attributes _tracer.add_attribute_to_current_span(HTTP_URL, url) # Add the request method to attributes _tracer.add_attribute_to_current_span(HTTP_METHOD, method) # Store the current span id to thread local. execution_context.set_opencensus_attr( 'httplib/current_span_id', _span.span_id) try: headers = headers.copy() headers.update(_tracer.propagator.to_headers( _span.context_tracer.span_context)) except Exception: # pragma: NO COVER pass return request_func(self, method, url, body, headers, *args, **kwargs) return call
def get_existing_path(path, topmost_path=None): """Get the longest parent path in `path` that exists. If `path` exists, it is returned. Args: path (str): Path to test topmost_path (str): Do not test this path or above Returns: str: Existing path, or None if no path was found. """ prev_path = None if topmost_path: topmost_path = os.path.normpath(topmost_path) while True: if os.path.exists(path): return path path = os.path.dirname(path) if path == prev_path: return None if topmost_path and os.path.normpath(path) == topmost_path: return None prev_path = path
Get the longest parent path in `path` that exists. If `path` exists, it is returned. Args: path (str): Path to test topmost_path (str): Do not test this path or above Returns: str: Existing path, or None if no path was found.
Below is the the instruction that describes the task: ### Input: Get the longest parent path in `path` that exists. If `path` exists, it is returned. Args: path (str): Path to test topmost_path (str): Do not test this path or above Returns: str: Existing path, or None if no path was found. ### Response: def get_existing_path(path, topmost_path=None): """Get the longest parent path in `path` that exists. If `path` exists, it is returned. Args: path (str): Path to test topmost_path (str): Do not test this path or above Returns: str: Existing path, or None if no path was found. """ prev_path = None if topmost_path: topmost_path = os.path.normpath(topmost_path) while True: if os.path.exists(path): return path path = os.path.dirname(path) if path == prev_path: return None if topmost_path and os.path.normpath(path) == topmost_path: return None prev_path = path
def output(data, **kwargs): # pylint: disable=unused-argument ''' Print the output data in JSON ''' try: if 'output_indent' not in __opts__: return salt.utils.json.dumps(data, default=repr, indent=4) indent = __opts__.get('output_indent') sort_keys = False if indent is None: indent = None elif indent == 'pretty': indent = 4 sort_keys = True elif isinstance(indent, int): if indent >= 0: indent = indent else: indent = None return salt.utils.json.dumps(data, default=repr, indent=indent, sort_keys=sort_keys) except UnicodeDecodeError as exc: log.error('Unable to serialize output to json') return salt.utils.json.dumps( {'error': 'Unable to serialize output to json', 'message': six.text_type(exc)} ) except TypeError: log.debug('An error occurred while outputting JSON', exc_info=True) # Return valid JSON for unserializable objects return salt.utils.json.dumps({})
Print the output data in JSON
Below is the the instruction that describes the task: ### Input: Print the output data in JSON ### Response: def output(data, **kwargs): # pylint: disable=unused-argument ''' Print the output data in JSON ''' try: if 'output_indent' not in __opts__: return salt.utils.json.dumps(data, default=repr, indent=4) indent = __opts__.get('output_indent') sort_keys = False if indent is None: indent = None elif indent == 'pretty': indent = 4 sort_keys = True elif isinstance(indent, int): if indent >= 0: indent = indent else: indent = None return salt.utils.json.dumps(data, default=repr, indent=indent, sort_keys=sort_keys) except UnicodeDecodeError as exc: log.error('Unable to serialize output to json') return salt.utils.json.dumps( {'error': 'Unable to serialize output to json', 'message': six.text_type(exc)} ) except TypeError: log.debug('An error occurred while outputting JSON', exc_info=True) # Return valid JSON for unserializable objects return salt.utils.json.dumps({})
def get_class_members(self, cls_name, cls): """Returns the list of class members to document in `cls`. This function filters the class member to ONLY return those defined by the class. It drops the inherited ones. Args: cls_name: Qualified name of `cls`. cls: An inspect object of type 'class'. Yields: name, member tuples. """ for name, member in inspect.getmembers(cls): # Only show methods and properties presently. In Python 3, # methods register as isfunction. is_method = inspect.ismethod(member) or inspect.isfunction(member) if not (is_method or isinstance(member, property)): continue if ((is_method and member.__name__ == "__init__") or self._should_include_member(name, member)): yield name, ("%s.%s" % (cls_name, name), member)
Returns the list of class members to document in `cls`. This function filters the class member to ONLY return those defined by the class. It drops the inherited ones. Args: cls_name: Qualified name of `cls`. cls: An inspect object of type 'class'. Yields: name, member tuples.
Below is the the instruction that describes the task: ### Input: Returns the list of class members to document in `cls`. This function filters the class member to ONLY return those defined by the class. It drops the inherited ones. Args: cls_name: Qualified name of `cls`. cls: An inspect object of type 'class'. Yields: name, member tuples. ### Response: def get_class_members(self, cls_name, cls): """Returns the list of class members to document in `cls`. This function filters the class member to ONLY return those defined by the class. It drops the inherited ones. Args: cls_name: Qualified name of `cls`. cls: An inspect object of type 'class'. Yields: name, member tuples. """ for name, member in inspect.getmembers(cls): # Only show methods and properties presently. In Python 3, # methods register as isfunction. is_method = inspect.ismethod(member) or inspect.isfunction(member) if not (is_method or isinstance(member, property)): continue if ((is_method and member.__name__ == "__init__") or self._should_include_member(name, member)): yield name, ("%s.%s" % (cls_name, name), member)
def RelaxNGSetSchema(self, schema): """Use RelaxNG to validate the document as it is processed. Activation is only possible before the first Read(). if @schema is None, then RelaxNG validation is desactivated. @ The @schema should not be freed until the reader is deallocated or its use has been deactivated. """ if schema is None: schema__o = None else: schema__o = schema._o ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(self._o, schema__o) return ret
Use RelaxNG to validate the document as it is processed. Activation is only possible before the first Read(). if @schema is None, then RelaxNG validation is desactivated. @ The @schema should not be freed until the reader is deallocated or its use has been deactivated.
Below is the the instruction that describes the task: ### Input: Use RelaxNG to validate the document as it is processed. Activation is only possible before the first Read(). if @schema is None, then RelaxNG validation is desactivated. @ The @schema should not be freed until the reader is deallocated or its use has been deactivated. ### Response: def RelaxNGSetSchema(self, schema): """Use RelaxNG to validate the document as it is processed. Activation is only possible before the first Read(). if @schema is None, then RelaxNG validation is desactivated. @ The @schema should not be freed until the reader is deallocated or its use has been deactivated. """ if schema is None: schema__o = None else: schema__o = schema._o ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(self._o, schema__o) return ret
def parse(self, data): """Parse a 9 bytes packet in the Temperature format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'id': "0x2EB2", 'packet_length': 8, 'packet_type': 80, 'packet_type_name': 'Temperature sensors', 'sequence_number': 0, 'packet_subtype': 1, 'packet_subtype_name': "THR128/138, THC138", 'temperature': 21.3, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict """ self.validate_packet(data) id_ = self.dump_hex(data[4:6]) # channel = data[5] TBC temperature = ((data[6] & 0x7f) * 256 + data[7]) / 10 signbit = data[6] & 0x80 if signbit != 0: temperature = -temperature sensor_specific = { 'id': id_, # 'channel': channel, TBC 'temperature': temperature } results = self.parse_header_part(data) results.update(RfxPacketUtils.parse_signal_and_battery(data[8])) results.update(sensor_specific) return results
Parse a 9 bytes packet in the Temperature format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'id': "0x2EB2", 'packet_length': 8, 'packet_type': 80, 'packet_type_name': 'Temperature sensors', 'sequence_number': 0, 'packet_subtype': 1, 'packet_subtype_name': "THR128/138, THC138", 'temperature': 21.3, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict
Below is the the instruction that describes the task: ### Input: Parse a 9 bytes packet in the Temperature format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'id': "0x2EB2", 'packet_length': 8, 'packet_type': 80, 'packet_type_name': 'Temperature sensors', 'sequence_number': 0, 'packet_subtype': 1, 'packet_subtype_name': "THR128/138, THC138", 'temperature': 21.3, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict ### Response: def parse(self, data): """Parse a 9 bytes packet in the Temperature format and return a dictionary containing the data extracted. An example of a return value would be: .. code-block:: python { 'id': "0x2EB2", 'packet_length': 8, 'packet_type': 80, 'packet_type_name': 'Temperature sensors', 'sequence_number': 0, 'packet_subtype': 1, 'packet_subtype_name': "THR128/138, THC138", 'temperature': 21.3, 'signal_level': 9, 'battery_level': 6, } :param data: bytearray to be parsed :type data: bytearray :return: Data dictionary containing the parsed values :rtype: dict """ self.validate_packet(data) id_ = self.dump_hex(data[4:6]) # channel = data[5] TBC temperature = ((data[6] & 0x7f) * 256 + data[7]) / 10 signbit = data[6] & 0x80 if signbit != 0: temperature = -temperature sensor_specific = { 'id': id_, # 'channel': channel, TBC 'temperature': temperature } results = self.parse_header_part(data) results.update(RfxPacketUtils.parse_signal_and_battery(data[8])) results.update(sensor_specific) return results
def repeat( df, column: str, *, times: int, new_column: str = None ): """ Duplicate each string in `column` by indicated number of time See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `times` (*int*): times to repeat the string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) """ new_column = new_column or column df.loc[:, new_column] = df[column].str.repeat(times) return df
Duplicate each string in `column` by indicated number of time See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `times` (*int*): times to repeat the string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used)
Below is the the instruction that describes the task: ### Input: Duplicate each string in `column` by indicated number of time See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `times` (*int*): times to repeat the string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) ### Response: def repeat( df, column: str, *, times: int, new_column: str = None ): """ Duplicate each string in `column` by indicated number of time See [pandas doc]( https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.repeat.html) for more information --- ### Parameters *mandatory :* - `column` (*str*): the column - `times` (*int*): times to repeat the string *optional :* - `new_column` (*str*): the destination column (if not set, `column` will be used) """ new_column = new_column or column df.loc[:, new_column] = df[column].str.repeat(times) return df
def while_do(self, classical_reg, q_program): """ While a classical register at index classical_reg is 1, loop q_program Equivalent to the following construction: .. code:: WHILE [c]: instr... => LABEL @START JUMP-UNLESS @END [c] instr... JUMP @START LABEL @END :param int classical_reg: The classical register to check :param Program q_program: The Quil program to loop. :return: The Quil Program with the loop instructions added. :rtype: Program """ label_start = LabelPlaceholder("START") label_end = LabelPlaceholder("END") self.inst(JumpTarget(label_start)) self.inst(JumpUnless(target=label_end, condition=classical_reg)) self.inst(q_program) self.inst(Jump(label_start)) self.inst(JumpTarget(label_end)) return self
While a classical register at index classical_reg is 1, loop q_program Equivalent to the following construction: .. code:: WHILE [c]: instr... => LABEL @START JUMP-UNLESS @END [c] instr... JUMP @START LABEL @END :param int classical_reg: The classical register to check :param Program q_program: The Quil program to loop. :return: The Quil Program with the loop instructions added. :rtype: Program
Below is the the instruction that describes the task: ### Input: While a classical register at index classical_reg is 1, loop q_program Equivalent to the following construction: .. code:: WHILE [c]: instr... => LABEL @START JUMP-UNLESS @END [c] instr... JUMP @START LABEL @END :param int classical_reg: The classical register to check :param Program q_program: The Quil program to loop. :return: The Quil Program with the loop instructions added. :rtype: Program ### Response: def while_do(self, classical_reg, q_program): """ While a classical register at index classical_reg is 1, loop q_program Equivalent to the following construction: .. code:: WHILE [c]: instr... => LABEL @START JUMP-UNLESS @END [c] instr... JUMP @START LABEL @END :param int classical_reg: The classical register to check :param Program q_program: The Quil program to loop. :return: The Quil Program with the loop instructions added. :rtype: Program """ label_start = LabelPlaceholder("START") label_end = LabelPlaceholder("END") self.inst(JumpTarget(label_start)) self.inst(JumpUnless(target=label_end, condition=classical_reg)) self.inst(q_program) self.inst(Jump(label_start)) self.inst(JumpTarget(label_end)) return self
def _db_install(self, db_name): """ Install nipap database schema """ self._logger.info("Installing NIPAP database schemas into db") self._execute(db_schema.ip_net % (db_name)) self._execute(db_schema.functions) self._execute(db_schema.triggers)
Install nipap database schema
Below is the the instruction that describes the task: ### Input: Install nipap database schema ### Response: def _db_install(self, db_name): """ Install nipap database schema """ self._logger.info("Installing NIPAP database schemas into db") self._execute(db_schema.ip_net % (db_name)) self._execute(db_schema.functions) self._execute(db_schema.triggers)
def pic_loggedrequiredremoterelease_v2(self): """Update the receiver link sequence.""" log = self.sequences.logs.fastaccess rec = self.sequences.receivers.fastaccess log.loggedrequiredremoterelease[0] = rec.s[0]
Update the receiver link sequence.
Below is the the instruction that describes the task: ### Input: Update the receiver link sequence. ### Response: def pic_loggedrequiredremoterelease_v2(self): """Update the receiver link sequence.""" log = self.sequences.logs.fastaccess rec = self.sequences.receivers.fastaccess log.loggedrequiredremoterelease[0] = rec.s[0]
def apply_tile_to_image(image, size, tile, tile_size, tile_corner): """ Copies a tile with a given offset onto an image :param image: The image the file is to be copied onto (as a list of (R,G,B) tuples) :param size: The size of the image as a tuple (width, height) :param tile: The tile to be copied over (as a list of (R,G,B) tuples) :param tile_size: The size of the tile as a tuple (width, height) :param tile_corner: The top left corner of the tile, in terms of the coordinates of the image, as a tuple (x,y) """ for y in range(tile_size[1]): for x in range(tile_size[0]): img_coords = (x + tile_corner[0], y + tile_corner[1]) image[coords_to_index(img_coords, size[0])] = tile[coords_to_index((x, y), tile_size[0])]
Copies a tile with a given offset onto an image :param image: The image the file is to be copied onto (as a list of (R,G,B) tuples) :param size: The size of the image as a tuple (width, height) :param tile: The tile to be copied over (as a list of (R,G,B) tuples) :param tile_size: The size of the tile as a tuple (width, height) :param tile_corner: The top left corner of the tile, in terms of the coordinates of the image, as a tuple (x,y)
Below is the the instruction that describes the task: ### Input: Copies a tile with a given offset onto an image :param image: The image the file is to be copied onto (as a list of (R,G,B) tuples) :param size: The size of the image as a tuple (width, height) :param tile: The tile to be copied over (as a list of (R,G,B) tuples) :param tile_size: The size of the tile as a tuple (width, height) :param tile_corner: The top left corner of the tile, in terms of the coordinates of the image, as a tuple (x,y) ### Response: def apply_tile_to_image(image, size, tile, tile_size, tile_corner): """ Copies a tile with a given offset onto an image :param image: The image the file is to be copied onto (as a list of (R,G,B) tuples) :param size: The size of the image as a tuple (width, height) :param tile: The tile to be copied over (as a list of (R,G,B) tuples) :param tile_size: The size of the tile as a tuple (width, height) :param tile_corner: The top left corner of the tile, in terms of the coordinates of the image, as a tuple (x,y) """ for y in range(tile_size[1]): for x in range(tile_size[0]): img_coords = (x + tile_corner[0], y + tile_corner[1]) image[coords_to_index(img_coords, size[0])] = tile[coords_to_index((x, y), tile_size[0])]
def dropEvent(self, event): """Allow user to drop supported files""" urls = mimedata2url(event.mimeData()) if urls: event.setDropAction(Qt.CopyAction) event.accept() self.sig_files_dropped.emit(urls) else: event.ignore()
Allow user to drop supported files
Below is the the instruction that describes the task: ### Input: Allow user to drop supported files ### Response: def dropEvent(self, event): """Allow user to drop supported files""" urls = mimedata2url(event.mimeData()) if urls: event.setDropAction(Qt.CopyAction) event.accept() self.sig_files_dropped.emit(urls) else: event.ignore()
def get(self, id, seq, line): # pylint: disable=invalid-name,redefined-builtin """Get a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param line: Line number in TestResult's logfile as an int. :return: :class:`highlights.Highlight <highlights.Highlight>` object """ schema = HighlightSchema() resp = self.service.get_id(self._base(id, seq), line) return self.service.decode(schema, resp)
Get a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param line: Line number in TestResult's logfile as an int. :return: :class:`highlights.Highlight <highlights.Highlight>` object
Below is the the instruction that describes the task: ### Input: Get a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param line: Line number in TestResult's logfile as an int. :return: :class:`highlights.Highlight <highlights.Highlight>` object ### Response: def get(self, id, seq, line): # pylint: disable=invalid-name,redefined-builtin """Get a highlight. :param id: Result ID as an int. :param seq: TestResult sequence ID as an int. :param line: Line number in TestResult's logfile as an int. :return: :class:`highlights.Highlight <highlights.Highlight>` object """ schema = HighlightSchema() resp = self.service.get_id(self._base(id, seq), line) return self.service.decode(schema, resp)
def load_object(import_path): """ Shamelessly stolen from https://github.com/ojii/django-load Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the likes. Import paths should be: "mypackage.mymodule.MyObject". It then imports the module up until the last dot and tries to get the attribute after that dot from the imported module. If the import path does not contain any dots, a TypeError is raised. If the module cannot be imported, an ImportError is raised. If the attribute does not exist in the module, a AttributeError is raised. """ if '.' not in import_path: raise TypeError( "'import_path' argument to 'load_object' must " "contain at least one dot." ) module_name, object_name = import_path.rsplit('.', 1) module = import_module(module_name) return getattr(module, object_name)
Shamelessly stolen from https://github.com/ojii/django-load Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the likes. Import paths should be: "mypackage.mymodule.MyObject". It then imports the module up until the last dot and tries to get the attribute after that dot from the imported module. If the import path does not contain any dots, a TypeError is raised. If the module cannot be imported, an ImportError is raised. If the attribute does not exist in the module, a AttributeError is raised.
Below is the the instruction that describes the task: ### Input: Shamelessly stolen from https://github.com/ojii/django-load Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the likes. Import paths should be: "mypackage.mymodule.MyObject". It then imports the module up until the last dot and tries to get the attribute after that dot from the imported module. If the import path does not contain any dots, a TypeError is raised. If the module cannot be imported, an ImportError is raised. If the attribute does not exist in the module, a AttributeError is raised. ### Response: def load_object(import_path): """ Shamelessly stolen from https://github.com/ojii/django-load Loads an object from an 'import_path', like in MIDDLEWARE_CLASSES and the likes. Import paths should be: "mypackage.mymodule.MyObject". It then imports the module up until the last dot and tries to get the attribute after that dot from the imported module. If the import path does not contain any dots, a TypeError is raised. If the module cannot be imported, an ImportError is raised. If the attribute does not exist in the module, a AttributeError is raised. """ if '.' not in import_path: raise TypeError( "'import_path' argument to 'load_object' must " "contain at least one dot." ) module_name, object_name = import_path.rsplit('.', 1) module = import_module(module_name) return getattr(module, object_name)
def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. """ try: dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) except OSError: raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") except ValueError: raise DotException("'dot' called with invalid arguments") except: raise DotException("Unexpected error calling 'dot'") self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options) dot.stdin.close() result = dot.stdout.read() returncode = dot.wait() if returncode != 0: raise DotException("'dot' returned the errorcode %d" % returncode) return result
Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur.
Below is the the instruction that describes the task: ### Input: Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. ### Response: def run_dot(self, args, name, parts=0, urls={}, graph_options={}, node_options={}, edge_options={}): """ Run graphviz 'dot' over this graph, returning whatever 'dot' writes to stdout. *args* will be passed along as commandline arguments. *name* is the name of the graph *urls* is a dictionary mapping class names to http urls Raises DotException for any of the many os and installation-related errors that may occur. """ try: dot = subprocess.Popen(['dot'] + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True) except OSError: raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?") except ValueError: raise DotException("'dot' called with invalid arguments") except: raise DotException("Unexpected error calling 'dot'") self.generate_dot(dot.stdin, name, parts, urls, graph_options, node_options, edge_options) dot.stdin.close() result = dot.stdout.read() returncode = dot.wait() if returncode != 0: raise DotException("'dot' returned the errorcode %d" % returncode) return result
def retry(self, queue=None, delay=None, max_retries=None): """ Marks the current job as needing to be retried. Interrupts it. """ max_retries = max_retries if max_retries is None: max_retries = self.max_retries if self.data.get("retry_count", 0) >= max_retries: raise MaxRetriesInterrupt() exc = RetryInterrupt() exc.queue = queue or self.queue or self.data.get("queue") or "default" exc.retry_count = self.data.get("retry_count", 0) + 1 exc.delay = delay if exc.delay is None: exc.delay = self.retry_delay self._attach_original_exception(exc) raise exc
Marks the current job as needing to be retried. Interrupts it.
Below is the the instruction that describes the task: ### Input: Marks the current job as needing to be retried. Interrupts it. ### Response: def retry(self, queue=None, delay=None, max_retries=None): """ Marks the current job as needing to be retried. Interrupts it. """ max_retries = max_retries if max_retries is None: max_retries = self.max_retries if self.data.get("retry_count", 0) >= max_retries: raise MaxRetriesInterrupt() exc = RetryInterrupt() exc.queue = queue or self.queue or self.data.get("queue") or "default" exc.retry_count = self.data.get("retry_count", 0) + 1 exc.delay = delay if exc.delay is None: exc.delay = self.retry_delay self._attach_original_exception(exc) raise exc
def resize(self, width, height, **kwargs): """Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping """ opts = Image._normalize_options(kwargs) size = self._get_size(width, height) if opts["mode"] == "adapt": self._adapt(size, opts) elif opts["mode"] == "clip": self._clip(size, opts) elif opts["mode"] == "fill": self._fill(size, opts) elif opts["mode"] == "scale": self._scale(size, opts) else: self._crop(size, opts) return self
Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping
Below is the the instruction that describes the task: ### Input: Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping ### Response: def resize(self, width, height, **kwargs): """Resizes the image to the supplied width/height. Returns the instance. Supports the following optional keyword arguments: mode - The resizing mode to use, see Image.MODES filter - The filter to use: see Image.FILTERS background - The hexadecimal background fill color, RGB or ARGB position - The position used to crop: see Image.POSITIONS for pre-defined positions or a custom position ratio retain - The minimum percentage of the original image to retain when cropping """ opts = Image._normalize_options(kwargs) size = self._get_size(width, height) if opts["mode"] == "adapt": self._adapt(size, opts) elif opts["mode"] == "clip": self._clip(size, opts) elif opts["mode"] == "fill": self._fill(size, opts) elif opts["mode"] == "scale": self._scale(size, opts) else: self._crop(size, opts) return self
def update_configuration(self, timeout=-1): """ Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps that were performed as part of the enclosure add. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Enclosure """ uri = "{}/configuration".format(self.data['uri']) return self.update_with_zero_body(uri=uri, timeout=timeout)
Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps that were performed as part of the enclosure add. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Enclosure
Below is the the instruction that describes the task: ### Input: Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps that were performed as part of the enclosure add. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Enclosure ### Response: def update_configuration(self, timeout=-1): """ Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps that were performed as part of the enclosure add. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Enclosure """ uri = "{}/configuration".format(self.data['uri']) return self.update_with_zero_body(uri=uri, timeout=timeout)
def _in_conf_dir(filename): """ Put the `filename` to the configuration directory context / path. """ return os.path.join( os.path.dirname(__file__), "templates/conf", filename )
Put the `filename` to the configuration directory context / path.
Below is the the instruction that describes the task: ### Input: Put the `filename` to the configuration directory context / path. ### Response: def _in_conf_dir(filename): """ Put the `filename` to the configuration directory context / path. """ return os.path.join( os.path.dirname(__file__), "templates/conf", filename )
def diff_with_target(self, binary_im): """ Creates a color image to visualize the overlap between two images. Nonzero pixels that match in both images are green. Nonzero pixels of this image that aren't in the other image are yellow Nonzero pixels of the other image that aren't in this image are red Parameters ---------- binary_im : :obj:`BinaryImage` binary image to take the difference with Returns ------- :obj:`ColorImage` color image to visualize the image difference """ red = np.array([BINARY_IM_MAX_VAL, 0, 0]) yellow = np.array([BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL, 0]) green = np.array([0, BINARY_IM_MAX_VAL, 0]) overlap_data = np.zeros([self.height, self.width, 3]) unfilled_px = np.where((self.data == 0) & (binary_im.data > 0)) overlap_data[unfilled_px[0], unfilled_px[1], :] = red filled_px = np.where((self.data > 0) & (binary_im.data > 0)) overlap_data[filled_px[0], filled_px[1], :] = green spurious_px = np.where((self.data > 0) & (binary_im.data == 0)) overlap_data[spurious_px[0], spurious_px[1], :] = yellow return ColorImage(overlap_data.astype(np.uint8), frame=self.frame)
Creates a color image to visualize the overlap between two images. Nonzero pixels that match in both images are green. Nonzero pixels of this image that aren't in the other image are yellow Nonzero pixels of the other image that aren't in this image are red Parameters ---------- binary_im : :obj:`BinaryImage` binary image to take the difference with Returns ------- :obj:`ColorImage` color image to visualize the image difference
Below is the the instruction that describes the task: ### Input: Creates a color image to visualize the overlap between two images. Nonzero pixels that match in both images are green. Nonzero pixels of this image that aren't in the other image are yellow Nonzero pixels of the other image that aren't in this image are red Parameters ---------- binary_im : :obj:`BinaryImage` binary image to take the difference with Returns ------- :obj:`ColorImage` color image to visualize the image difference ### Response: def diff_with_target(self, binary_im): """ Creates a color image to visualize the overlap between two images. Nonzero pixels that match in both images are green. Nonzero pixels of this image that aren't in the other image are yellow Nonzero pixels of the other image that aren't in this image are red Parameters ---------- binary_im : :obj:`BinaryImage` binary image to take the difference with Returns ------- :obj:`ColorImage` color image to visualize the image difference """ red = np.array([BINARY_IM_MAX_VAL, 0, 0]) yellow = np.array([BINARY_IM_MAX_VAL, BINARY_IM_MAX_VAL, 0]) green = np.array([0, BINARY_IM_MAX_VAL, 0]) overlap_data = np.zeros([self.height, self.width, 3]) unfilled_px = np.where((self.data == 0) & (binary_im.data > 0)) overlap_data[unfilled_px[0], unfilled_px[1], :] = red filled_px = np.where((self.data > 0) & (binary_im.data > 0)) overlap_data[filled_px[0], filled_px[1], :] = green spurious_px = np.where((self.data > 0) & (binary_im.data == 0)) overlap_data[spurious_px[0], spurious_px[1], :] = yellow return ColorImage(overlap_data.astype(np.uint8), frame=self.frame)
def plugins(self, deduplicate=False): ''' Returns a flattened list of all plugins used by page components. ''' plugins = [] for c in self.components: if hasattr(c, 'plugins'): plugins += c.plugins() elif isinstance(c, Lib): plugins.append(c) elif hasattr(c, 'is_plugin') and c.is_plugin: plugins.append(c) if deduplicate: plugins = list(OrderedDict.fromkeys(plugins)) return plugins
Returns a flattened list of all plugins used by page components.
Below is the the instruction that describes the task: ### Input: Returns a flattened list of all plugins used by page components. ### Response: def plugins(self, deduplicate=False): ''' Returns a flattened list of all plugins used by page components. ''' plugins = [] for c in self.components: if hasattr(c, 'plugins'): plugins += c.plugins() elif isinstance(c, Lib): plugins.append(c) elif hasattr(c, 'is_plugin') and c.is_plugin: plugins.append(c) if deduplicate: plugins = list(OrderedDict.fromkeys(plugins)) return plugins
def reset_crops(self): """ Reset all known crops to the default crop. If settings.ASSET_CELERY is specified then the task will be run async """ if self._can_crop(): if settings.CELERY or settings.USE_CELERY_DECORATOR: # this means that we are using celery tasks.reset_crops.apply_async(args=[self.pk], countdown=5) else: tasks.reset_crops(None, asset=self)
Reset all known crops to the default crop. If settings.ASSET_CELERY is specified then the task will be run async
Below is the the instruction that describes the task: ### Input: Reset all known crops to the default crop. If settings.ASSET_CELERY is specified then the task will be run async ### Response: def reset_crops(self): """ Reset all known crops to the default crop. If settings.ASSET_CELERY is specified then the task will be run async """ if self._can_crop(): if settings.CELERY or settings.USE_CELERY_DECORATOR: # this means that we are using celery tasks.reset_crops.apply_async(args=[self.pk], countdown=5) else: tasks.reset_crops(None, asset=self)
def header(self, name, value): """ Defines a URL path to match. Only call this method if the URL has no path already defined. Arguments: path (str): URL path value to match. E.g: ``/api/users``. Returns: self: current Mock instance. """ headers = {name: value} self._request.headers = headers self.add_matcher(matcher('HeadersMatcher', headers))
Defines a URL path to match. Only call this method if the URL has no path already defined. Arguments: path (str): URL path value to match. E.g: ``/api/users``. Returns: self: current Mock instance.
Below is the the instruction that describes the task: ### Input: Defines a URL path to match. Only call this method if the URL has no path already defined. Arguments: path (str): URL path value to match. E.g: ``/api/users``. Returns: self: current Mock instance. ### Response: def header(self, name, value): """ Defines a URL path to match. Only call this method if the URL has no path already defined. Arguments: path (str): URL path value to match. E.g: ``/api/users``. Returns: self: current Mock instance. """ headers = {name: value} self._request.headers = headers self.add_matcher(matcher('HeadersMatcher', headers))
def clean_doi(doi_string): """ Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids. """ regex = re.compile(r'\b(10[.][0-9]{3,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b') try: # Returns a list of matching strings m = re.findall(regex, doi_string) except TypeError as e: # If doi_string is None type, return empty list print("TypeError cleaning DOI: {}, {}".format(doi_string, e)) m = [] return m
Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids.
Below is the the instruction that describes the task: ### Input: Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids. ### Response: def clean_doi(doi_string): """ Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids. """ regex = re.compile(r'\b(10[.][0-9]{3,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b') try: # Returns a list of matching strings m = re.findall(regex, doi_string) except TypeError as e: # If doi_string is None type, return empty list print("TypeError cleaning DOI: {}, {}".format(doi_string, e)) m = [] return m
def find_charged(self, mol): """Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid.""" data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain') a_set = [] # Iterate through all residue, exclude those in chains defined as peptides for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]: if config.INTRA is not None: if res.GetChain() != config.INTRA: continue a_contributing = [] a_contributing_orig_idx = [] if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains for a in pybel.ob.OBResidueAtomIter(res): if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) \ and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf: a_contributing.append(pybel.Atom(a)) a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) if not len(a_contributing) == 0: a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='positive', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid for a in pybel.ob.OBResidueAtomIter(res): if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) \ and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf: a_contributing.append(pybel.Atom(a)) a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) if not len(a_contributing) == 0: a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='negative', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) return a_set
Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid.
Below is the the instruction that describes the task: ### Input: Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid. ### Response: def find_charged(self, mol): """Looks for positive charges in arginine, histidine or lysine, for negative in aspartic and glutamic acid.""" data = namedtuple('pcharge', 'atoms atoms_orig_idx type center restype resnr reschain') a_set = [] # Iterate through all residue, exclude those in chains defined as peptides for res in [r for r in pybel.ob.OBResidueIter(mol.OBMol) if not r.GetChain() in config.PEPTIDES]: if config.INTRA is not None: if res.GetChain() != config.INTRA: continue a_contributing = [] a_contributing_orig_idx = [] if res.GetName() in ('ARG', 'HIS', 'LYS'): # Arginine, Histidine or Lysine have charged sidechains for a in pybel.ob.OBResidueAtomIter(res): if a.GetType().startswith('N') and res.GetAtomProperty(a, 8) \ and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf: a_contributing.append(pybel.Atom(a)) a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) if not len(a_contributing) == 0: a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='positive', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) if res.GetName() in ('GLU', 'ASP'): # Aspartic or Glutamic Acid for a in pybel.ob.OBResidueAtomIter(res): if a.GetType().startswith('O') and res.GetAtomProperty(a, 8) \ and not self.Mapper.mapid(a.GetIdx(), mtype='protein') in self.altconf: a_contributing.append(pybel.Atom(a)) a_contributing_orig_idx.append(self.Mapper.mapid(a.GetIdx(), mtype='protein')) if not len(a_contributing) == 0: a_set.append(data(atoms=a_contributing, atoms_orig_idx=a_contributing_orig_idx, type='negative', center=centroid([ac.coords for ac in a_contributing]), restype=res.GetName(), resnr=res.GetNum(), reschain=res.GetChain())) return a_set
def get_ancestor_item(self, tree_alias, base_item): """Climbs up the site tree to resolve root item for chosen one. :param str|unicode tree_alias: :param TreeItemBase base_item: :rtype: TreeItemBase """ parent = None if hasattr(base_item, 'parent') and base_item.parent is not None: parent = self.get_ancestor_item(tree_alias, self.get_item_by_id(tree_alias, base_item.parent.id)) if parent is None: return base_item return parent
Climbs up the site tree to resolve root item for chosen one. :param str|unicode tree_alias: :param TreeItemBase base_item: :rtype: TreeItemBase
Below is the the instruction that describes the task: ### Input: Climbs up the site tree to resolve root item for chosen one. :param str|unicode tree_alias: :param TreeItemBase base_item: :rtype: TreeItemBase ### Response: def get_ancestor_item(self, tree_alias, base_item): """Climbs up the site tree to resolve root item for chosen one. :param str|unicode tree_alias: :param TreeItemBase base_item: :rtype: TreeItemBase """ parent = None if hasattr(base_item, 'parent') and base_item.parent is not None: parent = self.get_ancestor_item(tree_alias, self.get_item_by_id(tree_alias, base_item.parent.id)) if parent is None: return base_item return parent
def node_path(self, node): """Return two lists describing the path from this node to another Parameters ---------- node : instance of Node The other node. Returns ------- p1 : list First path (see below). p2 : list Second path (see below). Notes ----- The first list starts with this node and ends with the common parent between the endpoint nodes. The second list contains the remainder of the path from the common parent to the specified ending node. For example, consider the following scenegraph:: A --- B --- C --- D \ --- E --- F Calling `D.node_path(F)` will return:: ([D, C, B], [E, F]) """ p1 = self.parent_chain() p2 = node.parent_chain() cp = None for p in p1: if p in p2: cp = p break if cp is None: raise RuntimeError("No single-path common parent between nodes %s " "and %s." % (self, node)) p1 = p1[:p1.index(cp)+1] p2 = p2[:p2.index(cp)][::-1] return p1, p2
Return two lists describing the path from this node to another Parameters ---------- node : instance of Node The other node. Returns ------- p1 : list First path (see below). p2 : list Second path (see below). Notes ----- The first list starts with this node and ends with the common parent between the endpoint nodes. The second list contains the remainder of the path from the common parent to the specified ending node. For example, consider the following scenegraph:: A --- B --- C --- D \ --- E --- F Calling `D.node_path(F)` will return:: ([D, C, B], [E, F])
Below is the the instruction that describes the task: ### Input: Return two lists describing the path from this node to another Parameters ---------- node : instance of Node The other node. Returns ------- p1 : list First path (see below). p2 : list Second path (see below). Notes ----- The first list starts with this node and ends with the common parent between the endpoint nodes. The second list contains the remainder of the path from the common parent to the specified ending node. For example, consider the following scenegraph:: A --- B --- C --- D \ --- E --- F Calling `D.node_path(F)` will return:: ([D, C, B], [E, F]) ### Response: def node_path(self, node): """Return two lists describing the path from this node to another Parameters ---------- node : instance of Node The other node. Returns ------- p1 : list First path (see below). p2 : list Second path (see below). Notes ----- The first list starts with this node and ends with the common parent between the endpoint nodes. The second list contains the remainder of the path from the common parent to the specified ending node. For example, consider the following scenegraph:: A --- B --- C --- D \ --- E --- F Calling `D.node_path(F)` will return:: ([D, C, B], [E, F]) """ p1 = self.parent_chain() p2 = node.parent_chain() cp = None for p in p1: if p in p2: cp = p break if cp is None: raise RuntimeError("No single-path common parent between nodes %s " "and %s." % (self, node)) p1 = p1[:p1.index(cp)+1] p2 = p2[:p2.index(cp)][::-1] return p1, p2
def rounder(input_number, digit=5): """ Round input number and convert to str. :param input_number: input number :type input_number : anything :param digit: scale (the number of digits to the right of the decimal point in a number.) :type digit : int :return: round number as str """ if isinstance(input_number, tuple): tuple_list = list(input_number) tuple_str = [] for i in tuple_list: if isfloat(i): tuple_str.append(str(numpy.around(i, digit))) else: tuple_str.append(str(i)) return "(" + ",".join(tuple_str) + ")" if isfloat(input_number): return str(numpy.around(input_number, digit)) return str(input_number)
Round input number and convert to str. :param input_number: input number :type input_number : anything :param digit: scale (the number of digits to the right of the decimal point in a number.) :type digit : int :return: round number as str
Below is the the instruction that describes the task: ### Input: Round input number and convert to str. :param input_number: input number :type input_number : anything :param digit: scale (the number of digits to the right of the decimal point in a number.) :type digit : int :return: round number as str ### Response: def rounder(input_number, digit=5): """ Round input number and convert to str. :param input_number: input number :type input_number : anything :param digit: scale (the number of digits to the right of the decimal point in a number.) :type digit : int :return: round number as str """ if isinstance(input_number, tuple): tuple_list = list(input_number) tuple_str = [] for i in tuple_list: if isfloat(i): tuple_str.append(str(numpy.around(i, digit))) else: tuple_str.append(str(i)) return "(" + ",".join(tuple_str) + ")" if isfloat(input_number): return str(numpy.around(input_number, digit)) return str(input_number)