body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
5eef1271b9ba01e9fcb76086cd060ab988e39c0b686493f6b0ec207282577d29
def __init__(self, mean, tiny, grayscale, enc_add_res_block=0, dec_add_res_block=0, num_task_channel=3, num_pos_channel=1, num_gn_channel=32, num_mlr=0, num_unfrozen_encoder=0, full_size_output=False): '\n Constructor.\n @param mean Mean offset for task output.\n @param tiny Flag for tiny network.\n @param grayscale Flag for grayscale image input.\n @param enc_add_res_block Number of additional DSAC* style residual block for encoder.\n @param dec_add_res_block Number of additional DSAC* style residual block for decoder.\n @param num_task_channel Number of channels for underlying task.\n @param num_pos_channel Number of channels for additional task w/ positive values, e.g., uncertainty.\n @param num_gn_channel Number of group normalization channels, a hyper-parameter.\n @param num_mlr Number of homogeneous mid-level representations encoders.\n @param num_unfrozen_encoder Number of encoders that are not frozen.\n @param full_size_output Flag for full-size network output (by using DUC-style layers).\n\n Note: if enc_add_res_block == dec_add_res_block == 0 && num_task_channel == 3 && num_pos_channel = 0,\n the model become DSAC* net + group normalization only.\n ' super(TransPoseNet, self).__init__() 'Init' self.register_buffer('mean', torch.tensor(mean.size()).cuda()) self.mean = mean.clone() self.tiny = tiny self.grayscale = grayscale self.enc_add_res_block = enc_add_res_block self.dec_add_res_block = dec_add_res_block self.num_task_channel = num_task_channel self.num_pos_channel = num_pos_channel self.num_gn_channel = num_gn_channel self.num_mlr = num_mlr self.full_size_output = full_size_output self.OUTPUT_SUBSAMPLE = (1 if full_size_output else 8) 'Vanilla encoder' if (num_mlr == 0): self.encoder = TransPoseNetEncoder(tiny, grayscale, enc_add_res_block, num_gn_channel) self.encoder_ls = [self.encoder] else: self.encoder = nn.Identity() self.encoder_ls = [self.encoder] 'MLR encoders' if ((num_mlr > 0) and isinstance(num_mlr, int)): assert (0 <= num_unfrozen_encoder <= num_mlr) self.mlr_encoder_ls = [TransPoseNetEncoder(tiny, grayscale, enc_add_res_block, num_gn_channel) for _ in range(num_mlr)] for (i, block) in enumerate(self.mlr_encoder_ls): if (i >= num_unfrozen_encoder): for param in block.parameters(): param.requires_grad = False self.add_module('mlr_encoder_{:d}'.format((i + 1)), block) self.mlr_norm = nn.GroupNorm(num_gn_channel, ((512, 128)[tiny] * num_mlr)) self.mlr_forward = _create_mlr_concatenator(num_mlr, tiny, num_gn_channel) self.mlr_skip = _create_mlr_skip_layer(num_mlr, tiny, num_gn_channel) else: self.mlr_encoder_ls = [nn.Identity()] self.mlr_norm = nn.Identity() self.mlr_forward = nn.Identity() self.mlr_skip = nn.Identity() self.mlr_ls = (self.mlr_encoder_ls + [self.mlr_norm, self.mlr_forward, self.mlr_skip]) 'Decoder' self.decoder = TransPoseNetDecoder(mean, tiny, dec_add_res_block, num_task_channel, num_pos_channel, num_gn_channel, full_size_output) self.decoder_ls = [self.decoder] 'Print out' safe_printout('Initialized network w/ group normalization, Tiny net: {}, Grayscale input: {}, Fullsize output: {}.'.format(self.tiny, self.grayscale, self.full_size_output)) safe_printout('#Aadditional residual blocks: Encoder: {:d}, Decoder: {:d}'.format(self.enc_add_res_block, self.dec_add_res_block)) safe_printout('#Task output channel {:d}, #Positive-value output channel {:d}, #Group normalization channel: {:d}.'.format(self.num_task_channel, self.num_pos_channel, self.num_gn_channel)) safe_printout('#MLR: {:d}, #Unfrozen encoder: {:d}'.format(num_mlr, num_unfrozen_encoder)) ttl_num_param = 0 param_info = 'Separation of #trainable parameters: ' for (name, struct) in zip(['Vanilla encoder', 'MLR encoder', 'Decoder'], [self.encoder_ls, self.mlr_ls, self.decoder_ls]): num_param = sum([param.numel() for layer in struct for param in layer.parameters() if param.requires_grad]) ttl_num_param += num_param param_info += '{:s}: {:,d}, '.format(name, num_param) param_info += 'Total: {:,d}.'.format(ttl_num_param) safe_printout(param_info)
Constructor. @param mean Mean offset for task output. @param tiny Flag for tiny network. @param grayscale Flag for grayscale image input. @param enc_add_res_block Number of additional DSAC* style residual block for encoder. @param dec_add_res_block Number of additional DSAC* style residual block for decoder. @param num_task_channel Number of channels for underlying task. @param num_pos_channel Number of channels for additional task w/ positive values, e.g., uncertainty. @param num_gn_channel Number of group normalization channels, a hyper-parameter. @param num_mlr Number of homogeneous mid-level representations encoders. @param num_unfrozen_encoder Number of encoders that are not frozen. @param full_size_output Flag for full-size network output (by using DUC-style layers). Note: if enc_add_res_block == dec_add_res_block == 0 && num_task_channel == 3 && num_pos_channel = 0, the model become DSAC* net + group normalization only.
networks/networks.py
__init__
TOPO-EPFL/CrossLoc
16
python
def __init__(self, mean, tiny, grayscale, enc_add_res_block=0, dec_add_res_block=0, num_task_channel=3, num_pos_channel=1, num_gn_channel=32, num_mlr=0, num_unfrozen_encoder=0, full_size_output=False): '\n Constructor.\n @param mean Mean offset for task output.\n @param tiny Flag for tiny network.\n @param grayscale Flag for grayscale image input.\n @param enc_add_res_block Number of additional DSAC* style residual block for encoder.\n @param dec_add_res_block Number of additional DSAC* style residual block for decoder.\n @param num_task_channel Number of channels for underlying task.\n @param num_pos_channel Number of channels for additional task w/ positive values, e.g., uncertainty.\n @param num_gn_channel Number of group normalization channels, a hyper-parameter.\n @param num_mlr Number of homogeneous mid-level representations encoders.\n @param num_unfrozen_encoder Number of encoders that are not frozen.\n @param full_size_output Flag for full-size network output (by using DUC-style layers).\n\n Note: if enc_add_res_block == dec_add_res_block == 0 && num_task_channel == 3 && num_pos_channel = 0,\n the model become DSAC* net + group normalization only.\n ' super(TransPoseNet, self).__init__() 'Init' self.register_buffer('mean', torch.tensor(mean.size()).cuda()) self.mean = mean.clone() self.tiny = tiny self.grayscale = grayscale self.enc_add_res_block = enc_add_res_block self.dec_add_res_block = dec_add_res_block self.num_task_channel = num_task_channel self.num_pos_channel = num_pos_channel self.num_gn_channel = num_gn_channel self.num_mlr = num_mlr self.full_size_output = full_size_output self.OUTPUT_SUBSAMPLE = (1 if full_size_output else 8) 'Vanilla encoder' if (num_mlr == 0): self.encoder = TransPoseNetEncoder(tiny, grayscale, enc_add_res_block, num_gn_channel) self.encoder_ls = [self.encoder] else: self.encoder = nn.Identity() self.encoder_ls = [self.encoder] 'MLR encoders' if ((num_mlr > 0) and isinstance(num_mlr, int)): assert (0 <= num_unfrozen_encoder <= num_mlr) self.mlr_encoder_ls = [TransPoseNetEncoder(tiny, grayscale, enc_add_res_block, num_gn_channel) for _ in range(num_mlr)] for (i, block) in enumerate(self.mlr_encoder_ls): if (i >= num_unfrozen_encoder): for param in block.parameters(): param.requires_grad = False self.add_module('mlr_encoder_{:d}'.format((i + 1)), block) self.mlr_norm = nn.GroupNorm(num_gn_channel, ((512, 128)[tiny] * num_mlr)) self.mlr_forward = _create_mlr_concatenator(num_mlr, tiny, num_gn_channel) self.mlr_skip = _create_mlr_skip_layer(num_mlr, tiny, num_gn_channel) else: self.mlr_encoder_ls = [nn.Identity()] self.mlr_norm = nn.Identity() self.mlr_forward = nn.Identity() self.mlr_skip = nn.Identity() self.mlr_ls = (self.mlr_encoder_ls + [self.mlr_norm, self.mlr_forward, self.mlr_skip]) 'Decoder' self.decoder = TransPoseNetDecoder(mean, tiny, dec_add_res_block, num_task_channel, num_pos_channel, num_gn_channel, full_size_output) self.decoder_ls = [self.decoder] 'Print out' safe_printout('Initialized network w/ group normalization, Tiny net: {}, Grayscale input: {}, Fullsize output: {}.'.format(self.tiny, self.grayscale, self.full_size_output)) safe_printout('#Aadditional residual blocks: Encoder: {:d}, Decoder: {:d}'.format(self.enc_add_res_block, self.dec_add_res_block)) safe_printout('#Task output channel {:d}, #Positive-value output channel {:d}, #Group normalization channel: {:d}.'.format(self.num_task_channel, self.num_pos_channel, self.num_gn_channel)) safe_printout('#MLR: {:d}, #Unfrozen encoder: {:d}'.format(num_mlr, num_unfrozen_encoder)) ttl_num_param = 0 param_info = 'Separation of #trainable parameters: ' for (name, struct) in zip(['Vanilla encoder', 'MLR encoder', 'Decoder'], [self.encoder_ls, self.mlr_ls, self.decoder_ls]): num_param = sum([param.numel() for layer in struct for param in layer.parameters() if param.requires_grad]) ttl_num_param += num_param param_info += '{:s}: {:,d}, '.format(name, num_param) param_info += 'Total: {:,d}.'.format(ttl_num_param) safe_printout(param_info)
def __init__(self, mean, tiny, grayscale, enc_add_res_block=0, dec_add_res_block=0, num_task_channel=3, num_pos_channel=1, num_gn_channel=32, num_mlr=0, num_unfrozen_encoder=0, full_size_output=False): '\n Constructor.\n @param mean Mean offset for task output.\n @param tiny Flag for tiny network.\n @param grayscale Flag for grayscale image input.\n @param enc_add_res_block Number of additional DSAC* style residual block for encoder.\n @param dec_add_res_block Number of additional DSAC* style residual block for decoder.\n @param num_task_channel Number of channels for underlying task.\n @param num_pos_channel Number of channels for additional task w/ positive values, e.g., uncertainty.\n @param num_gn_channel Number of group normalization channels, a hyper-parameter.\n @param num_mlr Number of homogeneous mid-level representations encoders.\n @param num_unfrozen_encoder Number of encoders that are not frozen.\n @param full_size_output Flag for full-size network output (by using DUC-style layers).\n\n Note: if enc_add_res_block == dec_add_res_block == 0 && num_task_channel == 3 && num_pos_channel = 0,\n the model become DSAC* net + group normalization only.\n ' super(TransPoseNet, self).__init__() 'Init' self.register_buffer('mean', torch.tensor(mean.size()).cuda()) self.mean = mean.clone() self.tiny = tiny self.grayscale = grayscale self.enc_add_res_block = enc_add_res_block self.dec_add_res_block = dec_add_res_block self.num_task_channel = num_task_channel self.num_pos_channel = num_pos_channel self.num_gn_channel = num_gn_channel self.num_mlr = num_mlr self.full_size_output = full_size_output self.OUTPUT_SUBSAMPLE = (1 if full_size_output else 8) 'Vanilla encoder' if (num_mlr == 0): self.encoder = TransPoseNetEncoder(tiny, grayscale, enc_add_res_block, num_gn_channel) self.encoder_ls = [self.encoder] else: self.encoder = nn.Identity() self.encoder_ls = [self.encoder] 'MLR encoders' if ((num_mlr > 0) and isinstance(num_mlr, int)): assert (0 <= num_unfrozen_encoder <= num_mlr) self.mlr_encoder_ls = [TransPoseNetEncoder(tiny, grayscale, enc_add_res_block, num_gn_channel) for _ in range(num_mlr)] for (i, block) in enumerate(self.mlr_encoder_ls): if (i >= num_unfrozen_encoder): for param in block.parameters(): param.requires_grad = False self.add_module('mlr_encoder_{:d}'.format((i + 1)), block) self.mlr_norm = nn.GroupNorm(num_gn_channel, ((512, 128)[tiny] * num_mlr)) self.mlr_forward = _create_mlr_concatenator(num_mlr, tiny, num_gn_channel) self.mlr_skip = _create_mlr_skip_layer(num_mlr, tiny, num_gn_channel) else: self.mlr_encoder_ls = [nn.Identity()] self.mlr_norm = nn.Identity() self.mlr_forward = nn.Identity() self.mlr_skip = nn.Identity() self.mlr_ls = (self.mlr_encoder_ls + [self.mlr_norm, self.mlr_forward, self.mlr_skip]) 'Decoder' self.decoder = TransPoseNetDecoder(mean, tiny, dec_add_res_block, num_task_channel, num_pos_channel, num_gn_channel, full_size_output) self.decoder_ls = [self.decoder] 'Print out' safe_printout('Initialized network w/ group normalization, Tiny net: {}, Grayscale input: {}, Fullsize output: {}.'.format(self.tiny, self.grayscale, self.full_size_output)) safe_printout('#Aadditional residual blocks: Encoder: {:d}, Decoder: {:d}'.format(self.enc_add_res_block, self.dec_add_res_block)) safe_printout('#Task output channel {:d}, #Positive-value output channel {:d}, #Group normalization channel: {:d}.'.format(self.num_task_channel, self.num_pos_channel, self.num_gn_channel)) safe_printout('#MLR: {:d}, #Unfrozen encoder: {:d}'.format(num_mlr, num_unfrozen_encoder)) ttl_num_param = 0 param_info = 'Separation of #trainable parameters: ' for (name, struct) in zip(['Vanilla encoder', 'MLR encoder', 'Decoder'], [self.encoder_ls, self.mlr_ls, self.decoder_ls]): num_param = sum([param.numel() for layer in struct for param in layer.parameters() if param.requires_grad]) ttl_num_param += num_param param_info += '{:s}: {:,d}, '.format(name, num_param) param_info += 'Total: {:,d}.'.format(ttl_num_param) safe_printout(param_info)<|docstring|>Constructor. @param mean Mean offset for task output. @param tiny Flag for tiny network. @param grayscale Flag for grayscale image input. @param enc_add_res_block Number of additional DSAC* style residual block for encoder. @param dec_add_res_block Number of additional DSAC* style residual block for decoder. @param num_task_channel Number of channels for underlying task. @param num_pos_channel Number of channels for additional task w/ positive values, e.g., uncertainty. @param num_gn_channel Number of group normalization channels, a hyper-parameter. @param num_mlr Number of homogeneous mid-level representations encoders. @param num_unfrozen_encoder Number of encoders that are not frozen. @param full_size_output Flag for full-size network output (by using DUC-style layers). Note: if enc_add_res_block == dec_add_res_block == 0 && num_task_channel == 3 && num_pos_channel = 0, the model become DSAC* net + group normalization only.<|endoftext|>
4276527c0c872664403e8efc1ffeb77dc24621a056f4a3dab166f457a7ffdbde
def forward(self, inputs): '\n Forward pass.\n\n @param inputs 4D data tensor (BxCxHxW)\n ' x = inputs (up_height, up_width) = inputs.size()[2:4] 'Vanilla encoder' if (self.num_mlr == 0): res = self.encoder(x) else: res = None 'MLR encoder' if self.num_mlr: mlr_activation_ls = [mlr_enc(inputs) for mlr_enc in self.mlr_encoder_ls] mlr = torch.cat(mlr_activation_ls, dim=1) res = self.mlr_skip(mlr) mlr = self.mlr_norm(mlr) mlr = self.mlr_forward(mlr) res = F.relu((res + mlr)) 'Decoder' if self.full_size_output: sc = self.decoder(res, up_height, up_width) else: sc = self.decoder(res) return sc
Forward pass. @param inputs 4D data tensor (BxCxHxW)
networks/networks.py
forward
TOPO-EPFL/CrossLoc
16
python
def forward(self, inputs): '\n Forward pass.\n\n @param inputs 4D data tensor (BxCxHxW)\n ' x = inputs (up_height, up_width) = inputs.size()[2:4] 'Vanilla encoder' if (self.num_mlr == 0): res = self.encoder(x) else: res = None 'MLR encoder' if self.num_mlr: mlr_activation_ls = [mlr_enc(inputs) for mlr_enc in self.mlr_encoder_ls] mlr = torch.cat(mlr_activation_ls, dim=1) res = self.mlr_skip(mlr) mlr = self.mlr_norm(mlr) mlr = self.mlr_forward(mlr) res = F.relu((res + mlr)) 'Decoder' if self.full_size_output: sc = self.decoder(res, up_height, up_width) else: sc = self.decoder(res) return sc
def forward(self, inputs): '\n Forward pass.\n\n @param inputs 4D data tensor (BxCxHxW)\n ' x = inputs (up_height, up_width) = inputs.size()[2:4] 'Vanilla encoder' if (self.num_mlr == 0): res = self.encoder(x) else: res = None 'MLR encoder' if self.num_mlr: mlr_activation_ls = [mlr_enc(inputs) for mlr_enc in self.mlr_encoder_ls] mlr = torch.cat(mlr_activation_ls, dim=1) res = self.mlr_skip(mlr) mlr = self.mlr_norm(mlr) mlr = self.mlr_forward(mlr) res = F.relu((res + mlr)) 'Decoder' if self.full_size_output: sc = self.decoder(res, up_height, up_width) else: sc = self.decoder(res) return sc<|docstring|>Forward pass. @param inputs 4D data tensor (BxCxHxW)<|endoftext|>
23520102a861245018c6829bf2c55c1cba432731ad6f3ed7e13545cdfcae9950
def forward(self, inputs): '\n @inputs [B, C, H, W], H=60, W=90 if no data augmentation.\n ' x = F.relu(self.norm1(self.conv1(inputs))) x = F.relu(self.norm2(self.conv2(x))) x = F.relu(self.norm3(self.conv3(x))) x = F.relu(self.norm4(self.conv4(x))) x = self.avgpool(x) x = torch.flatten(x, 1) return x
@inputs [B, C, H, W], H=60, W=90 if no data augmentation.
networks/networks.py
forward
TOPO-EPFL/CrossLoc
16
python
def forward(self, inputs): '\n \n ' x = F.relu(self.norm1(self.conv1(inputs))) x = F.relu(self.norm2(self.conv2(x))) x = F.relu(self.norm3(self.conv3(x))) x = F.relu(self.norm4(self.conv4(x))) x = self.avgpool(x) x = torch.flatten(x, 1) return x
def forward(self, inputs): '\n \n ' x = F.relu(self.norm1(self.conv1(inputs))) x = F.relu(self.norm2(self.conv2(x))) x = F.relu(self.norm3(self.conv3(x))) x = F.relu(self.norm4(self.conv4(x))) x = self.avgpool(x) x = torch.flatten(x, 1) return x<|docstring|>@inputs [B, C, H, W], H=60, W=90 if no data augmentation.<|endoftext|>
dff5f0924117287b8a9d996b0d00f28f55c01912c2c95fa39a2fe596adc4d7d8
def tabulate_results(results, csv_path, method='stillness'): "\n Tabulate the results as calculated by the sequential pipeline.\n\n Parameters\n ----------\n results : {dict, str}\n Either a dictionary of the results, or the path to the h5 file where the results were stored.\n csv_path : str\n Path to save the tabular data at\n method : {'stillness', 'displacement'}, optional\n Which method to tabulate results for. Default is 'stillness'.\n " (days, times, duration, vdisp, mxa, mna, sparc) = ([], [], [], [], [], [], []) mtd = f'{method.capitalize()} Method' if isinstance(results, dict): day_list = [i for i in results['Processed']['Sit2Stand'] if ('Day' in i)] for day in day_list: days.extend(([int(day[4:])] * results['Processed']['Sit2Stand'][day][mtd]['STS Times'].shape[0])) times.extend(results['Processed']['Sit2Stand'][day][mtd]['STS Times']) duration.extend(results['Processed']['Sit2Stand'][day][mtd]['Duration']) vdisp.extend(results['Processed']['Sit2Stand'][day][mtd]['Vertical Displacement']) mxa.extend(results['Processed']['Sit2Stand'][day][mtd]['Max. Accel.']) mna.extend(results['Processed']['Sit2Stand'][day][mtd]['Min. Accel.']) sparc.extend(results['Processed']['Sit2Stand'][day][mtd]['SPARC']) else: with h5py.File(results, 'r') as f: day_list = [i for i in f['Processed/Sit2Stand'] if ('Day' in i)] for day in day_list: days.extend(([int(day[4:])] * f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times'].shape[0])) times.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times']) duration.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Duration']) vdisp.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Vertical Displacement']) mxa.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Max. Accel.']) mna.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Min. Accel.']) sparc.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/SPARC']) table = zeros((len(days), 12), dtype='object') table[(:, 0)] = days table[(:, 1:3)] = array(times) table[(:, 7)] = duration table[(:, 9)] = mxa table[(:, 10)] = mna table[(:, 11)] = sparc for (i, ts) in enumerate(table[(:, 1)]): dt = udt.utcfromtimestamp(ts) table[(i, 3)] = dt.strftime('%Y-%m-%d %H:%M:%S.%f') table[(i, 4)] = dt.hour table[(i, 5)] = dt.minute table[(i, 6)] = (dt.weekday() >= 5) hdr = 'Day,Start Unix Time,End Unix Time,Start Time,Hour,Minute,Weekend,Duration,Vertical Displacement,Max. Accel.,Min. Accel., SPARC' fmt = '%d, %f, %f, %s, %i, %i, %s, %f, %f, %f, %f, %f' savetxt(csv_path, table, header=hdr, fmt=fmt)
Tabulate the results as calculated by the sequential pipeline. Parameters ---------- results : {dict, str} Either a dictionary of the results, or the path to the h5 file where the results were stored. csv_path : str Path to save the tabular data at method : {'stillness', 'displacement'}, optional Which method to tabulate results for. Default is 'stillness'.
sit2standpy/v2/utility.py
tabulate_results
PfizerRD/pysit2stand
1
python
def tabulate_results(results, csv_path, method='stillness'): "\n Tabulate the results as calculated by the sequential pipeline.\n\n Parameters\n ----------\n results : {dict, str}\n Either a dictionary of the results, or the path to the h5 file where the results were stored.\n csv_path : str\n Path to save the tabular data at\n method : {'stillness', 'displacement'}, optional\n Which method to tabulate results for. Default is 'stillness'.\n " (days, times, duration, vdisp, mxa, mna, sparc) = ([], [], [], [], [], [], []) mtd = f'{method.capitalize()} Method' if isinstance(results, dict): day_list = [i for i in results['Processed']['Sit2Stand'] if ('Day' in i)] for day in day_list: days.extend(([int(day[4:])] * results['Processed']['Sit2Stand'][day][mtd]['STS Times'].shape[0])) times.extend(results['Processed']['Sit2Stand'][day][mtd]['STS Times']) duration.extend(results['Processed']['Sit2Stand'][day][mtd]['Duration']) vdisp.extend(results['Processed']['Sit2Stand'][day][mtd]['Vertical Displacement']) mxa.extend(results['Processed']['Sit2Stand'][day][mtd]['Max. Accel.']) mna.extend(results['Processed']['Sit2Stand'][day][mtd]['Min. Accel.']) sparc.extend(results['Processed']['Sit2Stand'][day][mtd]['SPARC']) else: with h5py.File(results, 'r') as f: day_list = [i for i in f['Processed/Sit2Stand'] if ('Day' in i)] for day in day_list: days.extend(([int(day[4:])] * f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times'].shape[0])) times.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times']) duration.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Duration']) vdisp.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Vertical Displacement']) mxa.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Max. Accel.']) mna.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Min. Accel.']) sparc.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/SPARC']) table = zeros((len(days), 12), dtype='object') table[(:, 0)] = days table[(:, 1:3)] = array(times) table[(:, 7)] = duration table[(:, 9)] = mxa table[(:, 10)] = mna table[(:, 11)] = sparc for (i, ts) in enumerate(table[(:, 1)]): dt = udt.utcfromtimestamp(ts) table[(i, 3)] = dt.strftime('%Y-%m-%d %H:%M:%S.%f') table[(i, 4)] = dt.hour table[(i, 5)] = dt.minute table[(i, 6)] = (dt.weekday() >= 5) hdr = 'Day,Start Unix Time,End Unix Time,Start Time,Hour,Minute,Weekend,Duration,Vertical Displacement,Max. Accel.,Min. Accel., SPARC' fmt = '%d, %f, %f, %s, %i, %i, %s, %f, %f, %f, %f, %f' savetxt(csv_path, table, header=hdr, fmt=fmt)
def tabulate_results(results, csv_path, method='stillness'): "\n Tabulate the results as calculated by the sequential pipeline.\n\n Parameters\n ----------\n results : {dict, str}\n Either a dictionary of the results, or the path to the h5 file where the results were stored.\n csv_path : str\n Path to save the tabular data at\n method : {'stillness', 'displacement'}, optional\n Which method to tabulate results for. Default is 'stillness'.\n " (days, times, duration, vdisp, mxa, mna, sparc) = ([], [], [], [], [], [], []) mtd = f'{method.capitalize()} Method' if isinstance(results, dict): day_list = [i for i in results['Processed']['Sit2Stand'] if ('Day' in i)] for day in day_list: days.extend(([int(day[4:])] * results['Processed']['Sit2Stand'][day][mtd]['STS Times'].shape[0])) times.extend(results['Processed']['Sit2Stand'][day][mtd]['STS Times']) duration.extend(results['Processed']['Sit2Stand'][day][mtd]['Duration']) vdisp.extend(results['Processed']['Sit2Stand'][day][mtd]['Vertical Displacement']) mxa.extend(results['Processed']['Sit2Stand'][day][mtd]['Max. Accel.']) mna.extend(results['Processed']['Sit2Stand'][day][mtd]['Min. Accel.']) sparc.extend(results['Processed']['Sit2Stand'][day][mtd]['SPARC']) else: with h5py.File(results, 'r') as f: day_list = [i for i in f['Processed/Sit2Stand'] if ('Day' in i)] for day in day_list: days.extend(([int(day[4:])] * f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times'].shape[0])) times.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times']) duration.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Duration']) vdisp.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Vertical Displacement']) mxa.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Max. Accel.']) mna.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Min. Accel.']) sparc.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/SPARC']) table = zeros((len(days), 12), dtype='object') table[(:, 0)] = days table[(:, 1:3)] = array(times) table[(:, 7)] = duration table[(:, 9)] = mxa table[(:, 10)] = mna table[(:, 11)] = sparc for (i, ts) in enumerate(table[(:, 1)]): dt = udt.utcfromtimestamp(ts) table[(i, 3)] = dt.strftime('%Y-%m-%d %H:%M:%S.%f') table[(i, 4)] = dt.hour table[(i, 5)] = dt.minute table[(i, 6)] = (dt.weekday() >= 5) hdr = 'Day,Start Unix Time,End Unix Time,Start Time,Hour,Minute,Weekend,Duration,Vertical Displacement,Max. Accel.,Min. Accel., SPARC' fmt = '%d, %f, %f, %s, %i, %i, %s, %f, %f, %f, %f, %f' savetxt(csv_path, table, header=hdr, fmt=fmt)<|docstring|>Tabulate the results as calculated by the sequential pipeline. Parameters ---------- results : {dict, str} Either a dictionary of the results, or the path to the h5 file where the results were stored. csv_path : str Path to save the tabular data at method : {'stillness', 'displacement'}, optional Which method to tabulate results for. Default is 'stillness'.<|endoftext|>
ad906f8b0d492b6e28f773e3f04fc79780d51052e51f1e74232f34c4669cbfaf
def mov_stats(seq, window): '\n Compute the centered moving average and standard deviation.\n\n Parameters\n ----------\n seq : numpy.ndarray\n Data to take the moving average and standard deviation on.\n window : int\n Window size for the moving average/standard deviation.\n\n Returns\n -------\n m_mn : numpy.ndarray\n Moving average\n m_st : numpy.ndarray\n Moving standard deviation\n pad : int\n Padding at beginning of the moving average and standard deviation\n ' def rolling_window(x, wind): if (not x.flags['C_CONTIGUOUS']): raise ValueError('Data must be C-contiguous to be able to window for moving statistics') shape = (x.shape[:(- 1)] + (((x.shape[(- 1)] - wind) + 1), wind)) strides = (x.strides + (x.strides[(- 1)],)) return stride_tricks.as_strided(x, shape=shape, strides=strides) m_mn = zeros(seq.shape) m_st = zeros(seq.shape) if (window < 2): window = 2 pad = int(ceil((window / 2))) rw_seq = rolling_window(seq, window) n = rw_seq.shape[0] m_mn[pad:(pad + n)] = mean(rw_seq, axis=(- 1)) m_st[pad:(pad + n)] = std(rw_seq, axis=(- 1), ddof=1) (m_mn[:pad], m_mn[(pad + n):]) = (m_mn[pad], m_mn[((- pad) - 1)]) (m_st[:pad], m_st[(pad + n):]) = (m_st[pad], m_st[((- pad) - 1)]) return (m_mn, m_st, pad)
Compute the centered moving average and standard deviation. Parameters ---------- seq : numpy.ndarray Data to take the moving average and standard deviation on. window : int Window size for the moving average/standard deviation. Returns ------- m_mn : numpy.ndarray Moving average m_st : numpy.ndarray Moving standard deviation pad : int Padding at beginning of the moving average and standard deviation
sit2standpy/v2/utility.py
mov_stats
PfizerRD/pysit2stand
1
python
def mov_stats(seq, window): '\n Compute the centered moving average and standard deviation.\n\n Parameters\n ----------\n seq : numpy.ndarray\n Data to take the moving average and standard deviation on.\n window : int\n Window size for the moving average/standard deviation.\n\n Returns\n -------\n m_mn : numpy.ndarray\n Moving average\n m_st : numpy.ndarray\n Moving standard deviation\n pad : int\n Padding at beginning of the moving average and standard deviation\n ' def rolling_window(x, wind): if (not x.flags['C_CONTIGUOUS']): raise ValueError('Data must be C-contiguous to be able to window for moving statistics') shape = (x.shape[:(- 1)] + (((x.shape[(- 1)] - wind) + 1), wind)) strides = (x.strides + (x.strides[(- 1)],)) return stride_tricks.as_strided(x, shape=shape, strides=strides) m_mn = zeros(seq.shape) m_st = zeros(seq.shape) if (window < 2): window = 2 pad = int(ceil((window / 2))) rw_seq = rolling_window(seq, window) n = rw_seq.shape[0] m_mn[pad:(pad + n)] = mean(rw_seq, axis=(- 1)) m_st[pad:(pad + n)] = std(rw_seq, axis=(- 1), ddof=1) (m_mn[:pad], m_mn[(pad + n):]) = (m_mn[pad], m_mn[((- pad) - 1)]) (m_st[:pad], m_st[(pad + n):]) = (m_st[pad], m_st[((- pad) - 1)]) return (m_mn, m_st, pad)
def mov_stats(seq, window): '\n Compute the centered moving average and standard deviation.\n\n Parameters\n ----------\n seq : numpy.ndarray\n Data to take the moving average and standard deviation on.\n window : int\n Window size for the moving average/standard deviation.\n\n Returns\n -------\n m_mn : numpy.ndarray\n Moving average\n m_st : numpy.ndarray\n Moving standard deviation\n pad : int\n Padding at beginning of the moving average and standard deviation\n ' def rolling_window(x, wind): if (not x.flags['C_CONTIGUOUS']): raise ValueError('Data must be C-contiguous to be able to window for moving statistics') shape = (x.shape[:(- 1)] + (((x.shape[(- 1)] - wind) + 1), wind)) strides = (x.strides + (x.strides[(- 1)],)) return stride_tricks.as_strided(x, shape=shape, strides=strides) m_mn = zeros(seq.shape) m_st = zeros(seq.shape) if (window < 2): window = 2 pad = int(ceil((window / 2))) rw_seq = rolling_window(seq, window) n = rw_seq.shape[0] m_mn[pad:(pad + n)] = mean(rw_seq, axis=(- 1)) m_st[pad:(pad + n)] = std(rw_seq, axis=(- 1), ddof=1) (m_mn[:pad], m_mn[(pad + n):]) = (m_mn[pad], m_mn[((- pad) - 1)]) (m_st[:pad], m_st[(pad + n):]) = (m_st[pad], m_st[((- pad) - 1)]) return (m_mn, m_st, pad)<|docstring|>Compute the centered moving average and standard deviation. Parameters ---------- seq : numpy.ndarray Data to take the moving average and standard deviation on. window : int Window size for the moving average/standard deviation. Returns ------- m_mn : numpy.ndarray Moving average m_st : numpy.ndarray Moving standard deviation pad : int Padding at beginning of the moving average and standard deviation<|endoftext|>
ea1ad910ae9ed98bf2996f3b2d07e5ebf69b01d34ee6d7208795dfd691c43850
def get_stillness(filt_accel, dt, window, gravity, thresholds): '\n Stillness determination based on filtered acceleration magnitude and jerk magnitude\n\n Parameters\n ----------\n filt_accel : numpy.ndarray\n 1D array of filtered magnitude of acceleration data, units of m/s^2\n dt : float\n Sampling time, in seconds\n window : float\n Moving statistics window length, in seconds\n gravity : float\n Gravitational acceleration, as measured by the sensor during static periods.\n thresholds : dict\n Dictionary of the 4 thresholds to be used - accel moving avg, accel moving std, \n jerk moving avg, and jerk moving std. \n Acceleration average thresholds should be for difference from gravitional acceleration.\n\n Returns\n -------\n still : numpy.ndarray\n (N, ) boolean array of stillness (True)\n starts : numpy.ndarray\n (Q, ) array of indices where stillness starts. Includes index 0 if still[0] is True. Q < (N/2)\n stops : numpy.ndarray\n (Q, ) array of indices where stillness ends. Includes index N-1 if still[-1] is True. Q < (N/2)\n ' n_window = int(around((window / dt))) (acc_rm, acc_rsd, _) = mov_stats(filt_accel, n_window) jerk = gradient(filt_accel, dt, edge_order=2) (jerk_rm, jerk_rsd, _) = mov_stats(jerk, n_window) arm_mask = (abs((acc_rm - gravity)) < thresholds['accel moving avg']) arsd_mask = (acc_rsd < thresholds['accel moving std']) jrm_mask = (abs(jerk_rm) < thresholds['jerk moving avg']) jrsd_mask = (jerk_rsd < thresholds['jerk moving std']) still = (((arm_mask & arsd_mask) & jrm_mask) & jrsd_mask) starts = where((diff(still.astype(int)) == 1))[0] stops = where((diff(still.astype(int)) == (- 1)))[0] if still[0]: starts = insert(starts, 0, 0) if still[(- 1)]: stops = append(stops, (len(still) - 1)) return (still, starts, stops)
Stillness determination based on filtered acceleration magnitude and jerk magnitude Parameters ---------- filt_accel : numpy.ndarray 1D array of filtered magnitude of acceleration data, units of m/s^2 dt : float Sampling time, in seconds window : float Moving statistics window length, in seconds gravity : float Gravitational acceleration, as measured by the sensor during static periods. thresholds : dict Dictionary of the 4 thresholds to be used - accel moving avg, accel moving std, jerk moving avg, and jerk moving std. Acceleration average thresholds should be for difference from gravitional acceleration. Returns ------- still : numpy.ndarray (N, ) boolean array of stillness (True) starts : numpy.ndarray (Q, ) array of indices where stillness starts. Includes index 0 if still[0] is True. Q < (N/2) stops : numpy.ndarray (Q, ) array of indices where stillness ends. Includes index N-1 if still[-1] is True. Q < (N/2)
sit2standpy/v2/utility.py
get_stillness
PfizerRD/pysit2stand
1
python
def get_stillness(filt_accel, dt, window, gravity, thresholds): '\n Stillness determination based on filtered acceleration magnitude and jerk magnitude\n\n Parameters\n ----------\n filt_accel : numpy.ndarray\n 1D array of filtered magnitude of acceleration data, units of m/s^2\n dt : float\n Sampling time, in seconds\n window : float\n Moving statistics window length, in seconds\n gravity : float\n Gravitational acceleration, as measured by the sensor during static periods.\n thresholds : dict\n Dictionary of the 4 thresholds to be used - accel moving avg, accel moving std, \n jerk moving avg, and jerk moving std. \n Acceleration average thresholds should be for difference from gravitional acceleration.\n\n Returns\n -------\n still : numpy.ndarray\n (N, ) boolean array of stillness (True)\n starts : numpy.ndarray\n (Q, ) array of indices where stillness starts. Includes index 0 if still[0] is True. Q < (N/2)\n stops : numpy.ndarray\n (Q, ) array of indices where stillness ends. Includes index N-1 if still[-1] is True. Q < (N/2)\n ' n_window = int(around((window / dt))) (acc_rm, acc_rsd, _) = mov_stats(filt_accel, n_window) jerk = gradient(filt_accel, dt, edge_order=2) (jerk_rm, jerk_rsd, _) = mov_stats(jerk, n_window) arm_mask = (abs((acc_rm - gravity)) < thresholds['accel moving avg']) arsd_mask = (acc_rsd < thresholds['accel moving std']) jrm_mask = (abs(jerk_rm) < thresholds['jerk moving avg']) jrsd_mask = (jerk_rsd < thresholds['jerk moving std']) still = (((arm_mask & arsd_mask) & jrm_mask) & jrsd_mask) starts = where((diff(still.astype(int)) == 1))[0] stops = where((diff(still.astype(int)) == (- 1)))[0] if still[0]: starts = insert(starts, 0, 0) if still[(- 1)]: stops = append(stops, (len(still) - 1)) return (still, starts, stops)
def get_stillness(filt_accel, dt, window, gravity, thresholds): '\n Stillness determination based on filtered acceleration magnitude and jerk magnitude\n\n Parameters\n ----------\n filt_accel : numpy.ndarray\n 1D array of filtered magnitude of acceleration data, units of m/s^2\n dt : float\n Sampling time, in seconds\n window : float\n Moving statistics window length, in seconds\n gravity : float\n Gravitational acceleration, as measured by the sensor during static periods.\n thresholds : dict\n Dictionary of the 4 thresholds to be used - accel moving avg, accel moving std, \n jerk moving avg, and jerk moving std. \n Acceleration average thresholds should be for difference from gravitional acceleration.\n\n Returns\n -------\n still : numpy.ndarray\n (N, ) boolean array of stillness (True)\n starts : numpy.ndarray\n (Q, ) array of indices where stillness starts. Includes index 0 if still[0] is True. Q < (N/2)\n stops : numpy.ndarray\n (Q, ) array of indices where stillness ends. Includes index N-1 if still[-1] is True. Q < (N/2)\n ' n_window = int(around((window / dt))) (acc_rm, acc_rsd, _) = mov_stats(filt_accel, n_window) jerk = gradient(filt_accel, dt, edge_order=2) (jerk_rm, jerk_rsd, _) = mov_stats(jerk, n_window) arm_mask = (abs((acc_rm - gravity)) < thresholds['accel moving avg']) arsd_mask = (acc_rsd < thresholds['accel moving std']) jrm_mask = (abs(jerk_rm) < thresholds['jerk moving avg']) jrsd_mask = (jerk_rsd < thresholds['jerk moving std']) still = (((arm_mask & arsd_mask) & jrm_mask) & jrsd_mask) starts = where((diff(still.astype(int)) == 1))[0] stops = where((diff(still.astype(int)) == (- 1)))[0] if still[0]: starts = insert(starts, 0, 0) if still[(- 1)]: stops = append(stops, (len(still) - 1)) return (still, starts, stops)<|docstring|>Stillness determination based on filtered acceleration magnitude and jerk magnitude Parameters ---------- filt_accel : numpy.ndarray 1D array of filtered magnitude of acceleration data, units of m/s^2 dt : float Sampling time, in seconds window : float Moving statistics window length, in seconds gravity : float Gravitational acceleration, as measured by the sensor during static periods. thresholds : dict Dictionary of the 4 thresholds to be used - accel moving avg, accel moving std, jerk moving avg, and jerk moving std. Acceleration average thresholds should be for difference from gravitional acceleration. Returns ------- still : numpy.ndarray (N, ) boolean array of stillness (True) starts : numpy.ndarray (Q, ) array of indices where stillness starts. Includes index 0 if still[0] is True. Q < (N/2) stops : numpy.ndarray (Q, ) array of indices where stillness ends. Includes index N-1 if still[-1] is True. Q < (N/2)<|endoftext|>
8f137d6d7cc904229f474f415fdb6524466048e0a51a9edee6156bf100376040
def remove_primers(workflow, fwd_primer, rev_primer, input_folder, output_folder, pair_id, threads): ' Identifies primers and N filters samples\n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n input_folder (string): path to input folder\n output_folder (string): path to output folder\n fwd_primer (string): forward primer\n rev_primer (string): reverse primer\n pair_id (string): pair identifier\n threads (string): number of threads\n\n Requires:\n dada2, Biostrings, ShortRead, tools r packages\n\n Returns:\n string: path to folder with primers removed files\n ' script_path = utilities.get_package_file('identify_primers', 'Rscript') filtN_folder = os.path.join(output_folder, 'filtN') primers_folder = os.path.join(output_folder, 'primers') fwd_primer_file = os.path.join(primers_folder, 'fwd_primer_file.txt') rev_primer_file = os.path.join(primers_folder, 'rev_primer_file.txt') cutadapt_folder = os.path.join(output_folder, 'cutadapt') workflow.add_task('[vars[0]] --input_dir=[args[3]] --filtn_dir=[vars[1]] --primers_dir=[vars[2]] --threads=[args[4]] --fwd_primer_file=[targets[0]] --rev_primer_file=[targets[1]] --fwd_primer=[args[0]] --rev_primer=[args[1]] --pair_id=[args[2]]', targets=[fwd_primer_file, rev_primer_file, TrackedDirectory(filtN_folder)], args=[fwd_primer, rev_primer, pair_id, input_folder, threads], vars=[script_path, filtN_folder, primers_folder, output_folder], name='identify_primers') pair_id2 = pair_id.replace('1', '2', 1) fwd_files = sorted(fnmatch.filter(os.listdir(input_folder), (('*' + pair_id) + '*.fastq*'))) rev_files = sorted(fnmatch.filter(os.listdir(input_folder), (('*' + pair_id2) + '*.fastq*'))) for i in range(0, len(fwd_files)): fwd_file = os.path.join(input_folder, fwd_files[i]) rev_file = os.path.join(input_folder, rev_files[i]) workflow.add_task(cutadapt_do, depends=[fwd_primer_file, rev_primer_file, fwd_file, rev_file, TrackedDirectory(filtN_folder), TrackedExecutable('cutadapt', version_command="echo 'cutadapt' `cutadapt --version`")], targets=[TrackedDirectory(cutadapt_folder)], name='remove_primers') return cutadapt_folder
Identifies primers and N filters samples Args: workflow (anadama2.workflow): an instance of the workflow class input_folder (string): path to input folder output_folder (string): path to output folder fwd_primer (string): forward primer rev_primer (string): reverse primer pair_id (string): pair identifier threads (string): number of threads Requires: dada2, Biostrings, ShortRead, tools r packages Returns: string: path to folder with primers removed files
biobakery_workflows/tasks/dadatwo.py
remove_primers
tkuntz-hsph/biobakery_workflows
47
python
def remove_primers(workflow, fwd_primer, rev_primer, input_folder, output_folder, pair_id, threads): ' Identifies primers and N filters samples\n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n input_folder (string): path to input folder\n output_folder (string): path to output folder\n fwd_primer (string): forward primer\n rev_primer (string): reverse primer\n pair_id (string): pair identifier\n threads (string): number of threads\n\n Requires:\n dada2, Biostrings, ShortRead, tools r packages\n\n Returns:\n string: path to folder with primers removed files\n ' script_path = utilities.get_package_file('identify_primers', 'Rscript') filtN_folder = os.path.join(output_folder, 'filtN') primers_folder = os.path.join(output_folder, 'primers') fwd_primer_file = os.path.join(primers_folder, 'fwd_primer_file.txt') rev_primer_file = os.path.join(primers_folder, 'rev_primer_file.txt') cutadapt_folder = os.path.join(output_folder, 'cutadapt') workflow.add_task('[vars[0]] --input_dir=[args[3]] --filtn_dir=[vars[1]] --primers_dir=[vars[2]] --threads=[args[4]] --fwd_primer_file=[targets[0]] --rev_primer_file=[targets[1]] --fwd_primer=[args[0]] --rev_primer=[args[1]] --pair_id=[args[2]]', targets=[fwd_primer_file, rev_primer_file, TrackedDirectory(filtN_folder)], args=[fwd_primer, rev_primer, pair_id, input_folder, threads], vars=[script_path, filtN_folder, primers_folder, output_folder], name='identify_primers') pair_id2 = pair_id.replace('1', '2', 1) fwd_files = sorted(fnmatch.filter(os.listdir(input_folder), (('*' + pair_id) + '*.fastq*'))) rev_files = sorted(fnmatch.filter(os.listdir(input_folder), (('*' + pair_id2) + '*.fastq*'))) for i in range(0, len(fwd_files)): fwd_file = os.path.join(input_folder, fwd_files[i]) rev_file = os.path.join(input_folder, rev_files[i]) workflow.add_task(cutadapt_do, depends=[fwd_primer_file, rev_primer_file, fwd_file, rev_file, TrackedDirectory(filtN_folder), TrackedExecutable('cutadapt', version_command="echo 'cutadapt' `cutadapt --version`")], targets=[TrackedDirectory(cutadapt_folder)], name='remove_primers') return cutadapt_folder
def remove_primers(workflow, fwd_primer, rev_primer, input_folder, output_folder, pair_id, threads): ' Identifies primers and N filters samples\n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n input_folder (string): path to input folder\n output_folder (string): path to output folder\n fwd_primer (string): forward primer\n rev_primer (string): reverse primer\n pair_id (string): pair identifier\n threads (string): number of threads\n\n Requires:\n dada2, Biostrings, ShortRead, tools r packages\n\n Returns:\n string: path to folder with primers removed files\n ' script_path = utilities.get_package_file('identify_primers', 'Rscript') filtN_folder = os.path.join(output_folder, 'filtN') primers_folder = os.path.join(output_folder, 'primers') fwd_primer_file = os.path.join(primers_folder, 'fwd_primer_file.txt') rev_primer_file = os.path.join(primers_folder, 'rev_primer_file.txt') cutadapt_folder = os.path.join(output_folder, 'cutadapt') workflow.add_task('[vars[0]] --input_dir=[args[3]] --filtn_dir=[vars[1]] --primers_dir=[vars[2]] --threads=[args[4]] --fwd_primer_file=[targets[0]] --rev_primer_file=[targets[1]] --fwd_primer=[args[0]] --rev_primer=[args[1]] --pair_id=[args[2]]', targets=[fwd_primer_file, rev_primer_file, TrackedDirectory(filtN_folder)], args=[fwd_primer, rev_primer, pair_id, input_folder, threads], vars=[script_path, filtN_folder, primers_folder, output_folder], name='identify_primers') pair_id2 = pair_id.replace('1', '2', 1) fwd_files = sorted(fnmatch.filter(os.listdir(input_folder), (('*' + pair_id) + '*.fastq*'))) rev_files = sorted(fnmatch.filter(os.listdir(input_folder), (('*' + pair_id2) + '*.fastq*'))) for i in range(0, len(fwd_files)): fwd_file = os.path.join(input_folder, fwd_files[i]) rev_file = os.path.join(input_folder, rev_files[i]) workflow.add_task(cutadapt_do, depends=[fwd_primer_file, rev_primer_file, fwd_file, rev_file, TrackedDirectory(filtN_folder), TrackedExecutable('cutadapt', version_command="echo 'cutadapt' `cutadapt --version`")], targets=[TrackedDirectory(cutadapt_folder)], name='remove_primers') return cutadapt_folder<|docstring|>Identifies primers and N filters samples Args: workflow (anadama2.workflow): an instance of the workflow class input_folder (string): path to input folder output_folder (string): path to output folder fwd_primer (string): forward primer rev_primer (string): reverse primer pair_id (string): pair identifier threads (string): number of threads Requires: dada2, Biostrings, ShortRead, tools r packages Returns: string: path to folder with primers removed files<|endoftext|>
9ce2db5d8810b7a08ad5010f77937b6b22c02a1a1bae5ee11f92caef28b0e181
def cutadapt_do(task): 'Reads primers from the files and runs cutadapt task\n Args:\n task (anadama2.task): an instance of the task class' from anadama2.util import get_name with open(get_name(task.depends[0])) as f: FWD = f.read().splitlines() with open(get_name(task.depends[1])) as f: REV = f.read().splitlines() cutadapt_folder = get_name(task.targets[0]) filtN_folder = get_name(task.depends[4]) fwd_filename = os.path.basename(get_name(task.depends[2])) rev_filename = os.path.basename(get_name(task.depends[3])) fwd_reads_out = os.path.join(cutadapt_folder, fwd_filename) rev_reads_out = os.path.join(cutadapt_folder, rev_filename) fwd_reads_in = os.path.join(filtN_folder, fwd_filename) rev_reads_in = os.path.join(filtN_folder, rev_filename) if (not os.path.exists(cutadapt_folder)): os.mkdir(cutadapt_folder) command = (((((((((((((((('cutadapt -g ' + FWD[0]) + ' -a ') + REV[1]) + ' -G ') + REV[0]) + ' -A ') + FWD[1]) + ' -n 2 -o ') + fwd_reads_out) + ' -p ') + rev_reads_out) + ' ') + fwd_reads_in) + ' ') + rev_reads_in) + ' --minimum-length 10') utilities.run_task(command, depends=task.depends, targets=task.targets)
Reads primers from the files and runs cutadapt task Args: task (anadama2.task): an instance of the task class
biobakery_workflows/tasks/dadatwo.py
cutadapt_do
tkuntz-hsph/biobakery_workflows
47
python
def cutadapt_do(task): 'Reads primers from the files and runs cutadapt task\n Args:\n task (anadama2.task): an instance of the task class' from anadama2.util import get_name with open(get_name(task.depends[0])) as f: FWD = f.read().splitlines() with open(get_name(task.depends[1])) as f: REV = f.read().splitlines() cutadapt_folder = get_name(task.targets[0]) filtN_folder = get_name(task.depends[4]) fwd_filename = os.path.basename(get_name(task.depends[2])) rev_filename = os.path.basename(get_name(task.depends[3])) fwd_reads_out = os.path.join(cutadapt_folder, fwd_filename) rev_reads_out = os.path.join(cutadapt_folder, rev_filename) fwd_reads_in = os.path.join(filtN_folder, fwd_filename) rev_reads_in = os.path.join(filtN_folder, rev_filename) if (not os.path.exists(cutadapt_folder)): os.mkdir(cutadapt_folder) command = (((((((((((((((('cutadapt -g ' + FWD[0]) + ' -a ') + REV[1]) + ' -G ') + REV[0]) + ' -A ') + FWD[1]) + ' -n 2 -o ') + fwd_reads_out) + ' -p ') + rev_reads_out) + ' ') + fwd_reads_in) + ' ') + rev_reads_in) + ' --minimum-length 10') utilities.run_task(command, depends=task.depends, targets=task.targets)
def cutadapt_do(task): 'Reads primers from the files and runs cutadapt task\n Args:\n task (anadama2.task): an instance of the task class' from anadama2.util import get_name with open(get_name(task.depends[0])) as f: FWD = f.read().splitlines() with open(get_name(task.depends[1])) as f: REV = f.read().splitlines() cutadapt_folder = get_name(task.targets[0]) filtN_folder = get_name(task.depends[4]) fwd_filename = os.path.basename(get_name(task.depends[2])) rev_filename = os.path.basename(get_name(task.depends[3])) fwd_reads_out = os.path.join(cutadapt_folder, fwd_filename) rev_reads_out = os.path.join(cutadapt_folder, rev_filename) fwd_reads_in = os.path.join(filtN_folder, fwd_filename) rev_reads_in = os.path.join(filtN_folder, rev_filename) if (not os.path.exists(cutadapt_folder)): os.mkdir(cutadapt_folder) command = (((((((((((((((('cutadapt -g ' + FWD[0]) + ' -a ') + REV[1]) + ' -G ') + REV[0]) + ' -A ') + FWD[1]) + ' -n 2 -o ') + fwd_reads_out) + ' -p ') + rev_reads_out) + ' ') + fwd_reads_in) + ' ') + rev_reads_in) + ' --minimum-length 10') utilities.run_task(command, depends=task.depends, targets=task.targets)<|docstring|>Reads primers from the files and runs cutadapt task Args: task (anadama2.task): an instance of the task class<|endoftext|>
fc301987072b613d46acdcce4f82f47fb669bc57902c49d85098d80b1f02f69e
def filter_trim(workflow, input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset): ' Filters samples by maxee and trims them, renders quality control plots\n of forward and reverse reads for each sample, creates read counts tsv and rds files.\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n input_folder (string): path to input folder\n output_folder (string): path to output folder\n maxee (string): maxee value to use for filtering\n trunc_len_max (string): max length for truncating reads\n pair_id (string): pair identifier\n threads (int): number of threads\n \n Requires:\n dada2, gridExtra,tools r packages\n \n Returns:\n string: path to file that contains read counts before and after filtering\n string: path to folder with filtered and trimmed sample files\n ' reads_plotF_png = files.SixteenS.path('readF_qc', output_folder) reads_plotR_png = files.SixteenS.path('readR_qc', output_folder) readcounts_tsv_path = os.path.join(output_folder, 'Read_counts_after_filtering.tsv') readcounts_rds_path = os.path.join(output_folder, 'Read_counts_filt.rds') filtered_dir = 'filtered_input' script_path = utilities.get_package_file('filter_and_trim', 'Rscript') workflow.add_task("[vars[0]] --input_dir=[args[0]] --output_dir=[args[1]] --filtered_dir=[vars[1]] --maxee=[args[2]] --trunc_len_max=[args[3]] --readcounts_tsv_path=[targets[0]] --readcounts_rds_path=[targets[1]] --reads_plotF=[targets[2]] --reads_plotR=[args[7]] --pair_id=[args[4]] --threads=[args[5]] --trunc_len_rev_offset='[args[6]]'", depends=[TrackedDirectory(input_folder)], targets=[readcounts_tsv_path, readcounts_rds_path, reads_plotF_png], args=[input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset, reads_plotR_png], vars=[script_path, filtered_dir], name='filter_and_trim') return (readcounts_tsv_path, filtered_dir)
Filters samples by maxee and trims them, renders quality control plots of forward and reverse reads for each sample, creates read counts tsv and rds files. Args: workflow (anadama2.workflow): an instance of the workflow class input_folder (string): path to input folder output_folder (string): path to output folder maxee (string): maxee value to use for filtering trunc_len_max (string): max length for truncating reads pair_id (string): pair identifier threads (int): number of threads Requires: dada2, gridExtra,tools r packages Returns: string: path to file that contains read counts before and after filtering string: path to folder with filtered and trimmed sample files
biobakery_workflows/tasks/dadatwo.py
filter_trim
tkuntz-hsph/biobakery_workflows
47
python
def filter_trim(workflow, input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset): ' Filters samples by maxee and trims them, renders quality control plots\n of forward and reverse reads for each sample, creates read counts tsv and rds files.\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n input_folder (string): path to input folder\n output_folder (string): path to output folder\n maxee (string): maxee value to use for filtering\n trunc_len_max (string): max length for truncating reads\n pair_id (string): pair identifier\n threads (int): number of threads\n \n Requires:\n dada2, gridExtra,tools r packages\n \n Returns:\n string: path to file that contains read counts before and after filtering\n string: path to folder with filtered and trimmed sample files\n ' reads_plotF_png = files.SixteenS.path('readF_qc', output_folder) reads_plotR_png = files.SixteenS.path('readR_qc', output_folder) readcounts_tsv_path = os.path.join(output_folder, 'Read_counts_after_filtering.tsv') readcounts_rds_path = os.path.join(output_folder, 'Read_counts_filt.rds') filtered_dir = 'filtered_input' script_path = utilities.get_package_file('filter_and_trim', 'Rscript') workflow.add_task("[vars[0]] --input_dir=[args[0]] --output_dir=[args[1]] --filtered_dir=[vars[1]] --maxee=[args[2]] --trunc_len_max=[args[3]] --readcounts_tsv_path=[targets[0]] --readcounts_rds_path=[targets[1]] --reads_plotF=[targets[2]] --reads_plotR=[args[7]] --pair_id=[args[4]] --threads=[args[5]] --trunc_len_rev_offset='[args[6]]'", depends=[TrackedDirectory(input_folder)], targets=[readcounts_tsv_path, readcounts_rds_path, reads_plotF_png], args=[input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset, reads_plotR_png], vars=[script_path, filtered_dir], name='filter_and_trim') return (readcounts_tsv_path, filtered_dir)
def filter_trim(workflow, input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset): ' Filters samples by maxee and trims them, renders quality control plots\n of forward and reverse reads for each sample, creates read counts tsv and rds files.\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n input_folder (string): path to input folder\n output_folder (string): path to output folder\n maxee (string): maxee value to use for filtering\n trunc_len_max (string): max length for truncating reads\n pair_id (string): pair identifier\n threads (int): number of threads\n \n Requires:\n dada2, gridExtra,tools r packages\n \n Returns:\n string: path to file that contains read counts before and after filtering\n string: path to folder with filtered and trimmed sample files\n ' reads_plotF_png = files.SixteenS.path('readF_qc', output_folder) reads_plotR_png = files.SixteenS.path('readR_qc', output_folder) readcounts_tsv_path = os.path.join(output_folder, 'Read_counts_after_filtering.tsv') readcounts_rds_path = os.path.join(output_folder, 'Read_counts_filt.rds') filtered_dir = 'filtered_input' script_path = utilities.get_package_file('filter_and_trim', 'Rscript') workflow.add_task("[vars[0]] --input_dir=[args[0]] --output_dir=[args[1]] --filtered_dir=[vars[1]] --maxee=[args[2]] --trunc_len_max=[args[3]] --readcounts_tsv_path=[targets[0]] --readcounts_rds_path=[targets[1]] --reads_plotF=[targets[2]] --reads_plotR=[args[7]] --pair_id=[args[4]] --threads=[args[5]] --trunc_len_rev_offset='[args[6]]'", depends=[TrackedDirectory(input_folder)], targets=[readcounts_tsv_path, readcounts_rds_path, reads_plotF_png], args=[input_folder, output_folder, maxee, trunc_len_max, pair_id, threads, trunc_len_rev_offset, reads_plotR_png], vars=[script_path, filtered_dir], name='filter_and_trim') return (readcounts_tsv_path, filtered_dir)<|docstring|>Filters samples by maxee and trims them, renders quality control plots of forward and reverse reads for each sample, creates read counts tsv and rds files. Args: workflow (anadama2.workflow): an instance of the workflow class input_folder (string): path to input folder output_folder (string): path to output folder maxee (string): maxee value to use for filtering trunc_len_max (string): max length for truncating reads pair_id (string): pair identifier threads (int): number of threads Requires: dada2, gridExtra,tools r packages Returns: string: path to file that contains read counts before and after filtering string: path to folder with filtered and trimmed sample files<|endoftext|>
7e1ba373c9d0083e1b47db7d52a95d7d3939d41b1a8ed70342f76f40fa56cc83
def learn_error(workflow, output_folder, filtered_dir, readcounts_tsv_path, threads): ' Learns error rates for each sample, renders error rates plots for forward and reverse reads\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n readcounts_tsv_path (string): path to read counts after filtering tsv file\n threads (int): number of threads\n\n Requires:\n dada2, ggplot2 r packages\n\n Returns:\n string: path to file that contains error rates of forward reads\n string: path to file that contains error rates of reverse reads\n ' error_ratesF_png = files.SixteenS.path('error_ratesF', output_folder) error_ratesR_png = files.SixteenS.path('error_ratesR', output_folder) error_ratesF_path = os.path.join(output_folder, 'error_ratesFWD.rds') error_ratesR_path = os.path.join(output_folder, 'error_ratesREV.rds') script_path = utilities.get_package_file('learn_error_rates', 'Rscript') workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --error_ratesF_png=[targets[0]] --error_ratesR_png=[args[2]] --error_ratesF_path=[targets[1]] --error_ratesR_path=[args[3]] --threads=[vars[1]]', depends=[readcounts_tsv_path], targets=[error_ratesF_png, error_ratesF_path], args=[output_folder, filtered_dir, error_ratesR_png, error_ratesR_path], vars=[script_path, threads], name='learn_error_rates') return (error_ratesF_path, error_ratesR_path)
Learns error rates for each sample, renders error rates plots for forward and reverse reads Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder filtered_dir (string): path to directory with filtered files readcounts_tsv_path (string): path to read counts after filtering tsv file threads (int): number of threads Requires: dada2, ggplot2 r packages Returns: string: path to file that contains error rates of forward reads string: path to file that contains error rates of reverse reads
biobakery_workflows/tasks/dadatwo.py
learn_error
tkuntz-hsph/biobakery_workflows
47
python
def learn_error(workflow, output_folder, filtered_dir, readcounts_tsv_path, threads): ' Learns error rates for each sample, renders error rates plots for forward and reverse reads\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n readcounts_tsv_path (string): path to read counts after filtering tsv file\n threads (int): number of threads\n\n Requires:\n dada2, ggplot2 r packages\n\n Returns:\n string: path to file that contains error rates of forward reads\n string: path to file that contains error rates of reverse reads\n ' error_ratesF_png = files.SixteenS.path('error_ratesF', output_folder) error_ratesR_png = files.SixteenS.path('error_ratesR', output_folder) error_ratesF_path = os.path.join(output_folder, 'error_ratesFWD.rds') error_ratesR_path = os.path.join(output_folder, 'error_ratesREV.rds') script_path = utilities.get_package_file('learn_error_rates', 'Rscript') workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --error_ratesF_png=[targets[0]] --error_ratesR_png=[args[2]] --error_ratesF_path=[targets[1]] --error_ratesR_path=[args[3]] --threads=[vars[1]]', depends=[readcounts_tsv_path], targets=[error_ratesF_png, error_ratesF_path], args=[output_folder, filtered_dir, error_ratesR_png, error_ratesR_path], vars=[script_path, threads], name='learn_error_rates') return (error_ratesF_path, error_ratesR_path)
def learn_error(workflow, output_folder, filtered_dir, readcounts_tsv_path, threads): ' Learns error rates for each sample, renders error rates plots for forward and reverse reads\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n readcounts_tsv_path (string): path to read counts after filtering tsv file\n threads (int): number of threads\n\n Requires:\n dada2, ggplot2 r packages\n\n Returns:\n string: path to file that contains error rates of forward reads\n string: path to file that contains error rates of reverse reads\n ' error_ratesF_png = files.SixteenS.path('error_ratesF', output_folder) error_ratesR_png = files.SixteenS.path('error_ratesR', output_folder) error_ratesF_path = os.path.join(output_folder, 'error_ratesFWD.rds') error_ratesR_path = os.path.join(output_folder, 'error_ratesREV.rds') script_path = utilities.get_package_file('learn_error_rates', 'Rscript') workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --error_ratesF_png=[targets[0]] --error_ratesR_png=[args[2]] --error_ratesF_path=[targets[1]] --error_ratesR_path=[args[3]] --threads=[vars[1]]', depends=[readcounts_tsv_path], targets=[error_ratesF_png, error_ratesF_path], args=[output_folder, filtered_dir, error_ratesR_png, error_ratesR_path], vars=[script_path, threads], name='learn_error_rates') return (error_ratesF_path, error_ratesR_path)<|docstring|>Learns error rates for each sample, renders error rates plots for forward and reverse reads Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder filtered_dir (string): path to directory with filtered files readcounts_tsv_path (string): path to read counts after filtering tsv file threads (int): number of threads Requires: dada2, ggplot2 r packages Returns: string: path to file that contains error rates of forward reads string: path to file that contains error rates of reverse reads<|endoftext|>
2299ab290e6523e07579a3bfda345a84c929eacba7aa1b31d605d7f46ae1addb
def merge_paired_ends(workflow, output_dir, filtered_dir, error_ratesF_path, error_ratesR_path, threads, minoverlap, maxmismatch): ' Dereplicates and merges paired reads\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n error_ratesF_path (string): path to rds file that contains error rates of forward reads\n error_ratesR_path (string): path to rds file that contains error rates of reverse reads\n threads (int): number of threads\n minoverlap (int): the min number of pairs for overlap for the merge step\n maxmismatch (int): the max number of mismatch for pairs to merge\n Requires:\n dada2, tools r packages\n \n Returns:\n string: path to rds file that contains merged and dereplicated reads\n ' mergers_file_path = os.path.join(output_dir, 'mergers.rds') script_path = utilities.get_package_file('merge_paired_ends', 'Rscript') workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --error_ratesF_path=[depends[0]] --error_ratesR_path=[args[4]] --mergers_file_path=[targets[0]] --threads=[vars[1]] --minoverlap=[args[2]] --maxmismatch=[args[3]]', depends=[error_ratesF_path], targets=[mergers_file_path], args=[output_dir, filtered_dir, minoverlap, maxmismatch, error_ratesR_path], vars=[script_path, threads], name='dereplicate_and_merge') return mergers_file_path
Dereplicates and merges paired reads Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder filtered_dir (string): path to directory with filtered files error_ratesF_path (string): path to rds file that contains error rates of forward reads error_ratesR_path (string): path to rds file that contains error rates of reverse reads threads (int): number of threads minoverlap (int): the min number of pairs for overlap for the merge step maxmismatch (int): the max number of mismatch for pairs to merge Requires: dada2, tools r packages Returns: string: path to rds file that contains merged and dereplicated reads
biobakery_workflows/tasks/dadatwo.py
merge_paired_ends
tkuntz-hsph/biobakery_workflows
47
python
def merge_paired_ends(workflow, output_dir, filtered_dir, error_ratesF_path, error_ratesR_path, threads, minoverlap, maxmismatch): ' Dereplicates and merges paired reads\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n error_ratesF_path (string): path to rds file that contains error rates of forward reads\n error_ratesR_path (string): path to rds file that contains error rates of reverse reads\n threads (int): number of threads\n minoverlap (int): the min number of pairs for overlap for the merge step\n maxmismatch (int): the max number of mismatch for pairs to merge\n Requires:\n dada2, tools r packages\n \n Returns:\n string: path to rds file that contains merged and dereplicated reads\n ' mergers_file_path = os.path.join(output_dir, 'mergers.rds') script_path = utilities.get_package_file('merge_paired_ends', 'Rscript') workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --error_ratesF_path=[depends[0]] --error_ratesR_path=[args[4]] --mergers_file_path=[targets[0]] --threads=[vars[1]] --minoverlap=[args[2]] --maxmismatch=[args[3]]', depends=[error_ratesF_path], targets=[mergers_file_path], args=[output_dir, filtered_dir, minoverlap, maxmismatch, error_ratesR_path], vars=[script_path, threads], name='dereplicate_and_merge') return mergers_file_path
def merge_paired_ends(workflow, output_dir, filtered_dir, error_ratesF_path, error_ratesR_path, threads, minoverlap, maxmismatch): ' Dereplicates and merges paired reads\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n error_ratesF_path (string): path to rds file that contains error rates of forward reads\n error_ratesR_path (string): path to rds file that contains error rates of reverse reads\n threads (int): number of threads\n minoverlap (int): the min number of pairs for overlap for the merge step\n maxmismatch (int): the max number of mismatch for pairs to merge\n Requires:\n dada2, tools r packages\n \n Returns:\n string: path to rds file that contains merged and dereplicated reads\n ' mergers_file_path = os.path.join(output_dir, 'mergers.rds') script_path = utilities.get_package_file('merge_paired_ends', 'Rscript') workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --error_ratesF_path=[depends[0]] --error_ratesR_path=[args[4]] --mergers_file_path=[targets[0]] --threads=[vars[1]] --minoverlap=[args[2]] --maxmismatch=[args[3]]', depends=[error_ratesF_path], targets=[mergers_file_path], args=[output_dir, filtered_dir, minoverlap, maxmismatch, error_ratesR_path], vars=[script_path, threads], name='dereplicate_and_merge') return mergers_file_path<|docstring|>Dereplicates and merges paired reads Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder filtered_dir (string): path to directory with filtered files error_ratesF_path (string): path to rds file that contains error rates of forward reads error_ratesR_path (string): path to rds file that contains error rates of reverse reads threads (int): number of threads minoverlap (int): the min number of pairs for overlap for the merge step maxmismatch (int): the max number of mismatch for pairs to merge Requires: dada2, tools r packages Returns: string: path to rds file that contains merged and dereplicated reads<|endoftext|>
ec96af9c78223d3e999dde1e0fc3a266ad77827ce8bc4b5e46b3e7ca4f682ed0
def const_seq_table(workflow, output_folder, filtered_dir, mergers_file_path, threads): ' Builds ASV table, removes chimeras, creates read counts at each step, and fasta file with all sequences\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n mergers_file_path (string): path to rds file that contains merged reads\n threads (int): number of threads\n \n Requires:\n dada2, tools, seqinr r packages\n\n Returns:\n string: path to rds file that contains ASV data\n string: path to read counts at each step tsv file\n string: path to fasta file with all sequences\n ' read_counts_steps_path = files.SixteenS.path('counts_each_step', output_folder) seqtab_file_path = os.path.join(output_folder, 'seqtab_final.rds') seqs_fasta_path = os.path.join(output_folder, 'sequences.fasta') readcounts_rds = 'Read_counts_filt.rds' asv_tsv = 'all_samples_SV_counts.tsv' script_path = utilities.get_package_file('const_seq_table', 'Rscript') version_script = utilities.get_package_file('dada2_version', 'Rscript') version_command = 'echo \'r\' `r -e \'packageVersion("dada2")\' | grep -C 1 dada2`' workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --merged_file_path=[depends[0]] --read_counts_steps_path=[targets[0]] --readcounts_rds=[vars[2]] --asv_tsv=[vars[3]] --seqtab_file_path=[targets[1]] --seqs_fasta_path=[targets[2]] --threads=[vars[1]]', depends=[mergers_file_path, TrackedExecutable('R', version_command=(((("echo '" + version_script) + "' `") + version_script) + '`'))], targets=[read_counts_steps_path, seqtab_file_path, seqs_fasta_path], args=[output_folder, filtered_dir], vars=[script_path, threads, readcounts_rds, asv_tsv], name='construct_sequence_table') return (seqtab_file_path, read_counts_steps_path, seqs_fasta_path)
Builds ASV table, removes chimeras, creates read counts at each step, and fasta file with all sequences Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder filtered_dir (string): path to directory with filtered files mergers_file_path (string): path to rds file that contains merged reads threads (int): number of threads Requires: dada2, tools, seqinr r packages Returns: string: path to rds file that contains ASV data string: path to read counts at each step tsv file string: path to fasta file with all sequences
biobakery_workflows/tasks/dadatwo.py
const_seq_table
tkuntz-hsph/biobakery_workflows
47
python
def const_seq_table(workflow, output_folder, filtered_dir, mergers_file_path, threads): ' Builds ASV table, removes chimeras, creates read counts at each step, and fasta file with all sequences\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n mergers_file_path (string): path to rds file that contains merged reads\n threads (int): number of threads\n \n Requires:\n dada2, tools, seqinr r packages\n\n Returns:\n string: path to rds file that contains ASV data\n string: path to read counts at each step tsv file\n string: path to fasta file with all sequences\n ' read_counts_steps_path = files.SixteenS.path('counts_each_step', output_folder) seqtab_file_path = os.path.join(output_folder, 'seqtab_final.rds') seqs_fasta_path = os.path.join(output_folder, 'sequences.fasta') readcounts_rds = 'Read_counts_filt.rds' asv_tsv = 'all_samples_SV_counts.tsv' script_path = utilities.get_package_file('const_seq_table', 'Rscript') version_script = utilities.get_package_file('dada2_version', 'Rscript') version_command = 'echo \'r\' `r -e \'packageVersion("dada2")\' | grep -C 1 dada2`' workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --merged_file_path=[depends[0]] --read_counts_steps_path=[targets[0]] --readcounts_rds=[vars[2]] --asv_tsv=[vars[3]] --seqtab_file_path=[targets[1]] --seqs_fasta_path=[targets[2]] --threads=[vars[1]]', depends=[mergers_file_path, TrackedExecutable('R', version_command=(((("echo '" + version_script) + "' `") + version_script) + '`'))], targets=[read_counts_steps_path, seqtab_file_path, seqs_fasta_path], args=[output_folder, filtered_dir], vars=[script_path, threads, readcounts_rds, asv_tsv], name='construct_sequence_table') return (seqtab_file_path, read_counts_steps_path, seqs_fasta_path)
def const_seq_table(workflow, output_folder, filtered_dir, mergers_file_path, threads): ' Builds ASV table, removes chimeras, creates read counts at each step, and fasta file with all sequences\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n filtered_dir (string): path to directory with filtered files\n mergers_file_path (string): path to rds file that contains merged reads\n threads (int): number of threads\n \n Requires:\n dada2, tools, seqinr r packages\n\n Returns:\n string: path to rds file that contains ASV data\n string: path to read counts at each step tsv file\n string: path to fasta file with all sequences\n ' read_counts_steps_path = files.SixteenS.path('counts_each_step', output_folder) seqtab_file_path = os.path.join(output_folder, 'seqtab_final.rds') seqs_fasta_path = os.path.join(output_folder, 'sequences.fasta') readcounts_rds = 'Read_counts_filt.rds' asv_tsv = 'all_samples_SV_counts.tsv' script_path = utilities.get_package_file('const_seq_table', 'Rscript') version_script = utilities.get_package_file('dada2_version', 'Rscript') version_command = 'echo \'r\' `r -e \'packageVersion("dada2")\' | grep -C 1 dada2`' workflow.add_task('[vars[0]] --output_dir=[args[0]] --filtered_dir=[args[1]] --merged_file_path=[depends[0]] --read_counts_steps_path=[targets[0]] --readcounts_rds=[vars[2]] --asv_tsv=[vars[3]] --seqtab_file_path=[targets[1]] --seqs_fasta_path=[targets[2]] --threads=[vars[1]]', depends=[mergers_file_path, TrackedExecutable('R', version_command=(((("echo '" + version_script) + "' `") + version_script) + '`'))], targets=[read_counts_steps_path, seqtab_file_path, seqs_fasta_path], args=[output_folder, filtered_dir], vars=[script_path, threads, readcounts_rds, asv_tsv], name='construct_sequence_table') return (seqtab_file_path, read_counts_steps_path, seqs_fasta_path)<|docstring|>Builds ASV table, removes chimeras, creates read counts at each step, and fasta file with all sequences Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder filtered_dir (string): path to directory with filtered files mergers_file_path (string): path to rds file that contains merged reads threads (int): number of threads Requires: dada2, tools, seqinr r packages Returns: string: path to rds file that contains ASV data string: path to read counts at each step tsv file string: path to fasta file with all sequences<|endoftext|>
b1ae30f0d9b1e45b2abece8a49fa8b7339ad50027599d84bdbb37ffcab108309
def assign_taxonomy(workflow, output_folder, seqtab_file_path, ref_path, threads, tryRC): ' Assigns taxonomy using green genes, silva, or rdp database, creates closed reference file\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n seqtab_file_path (string): path to rds file that contains ASV data\n ref_path (string): reference database name\n threads (int):\n \n Requires:\n dada2 r package\n \n Returns:\n string: path to closed reference file\n ' otu_closed_ref_path = files.SixteenS.path('otu_table_closed_reference', output_folder) if (ref_path == 'unite'): refdb_path = config.SixteenS().unite refdb_species_path = 'None' elif (ref_path == 'silva'): refdb_path = config.SixteenS().silva_dada2 refdb_species_path = config.SixteenS().silva_species_dada2 elif (ref_path == 'rdp'): refdb_path = config.SixteenS().rdp_dada2 refdb_species_path = config.SixteenS().rdp_species_dada2 else: refdb_path = config.SixteenS().greengenes_dada2 refdb_species_path = 'None' script_path = utilities.get_package_file('assign_taxonomy', 'Rscript') workflow.add_task('[vars[2]] --output_dir=[args[0]] --refdb_path=[vars[0]] --refdb_species_path=[vars[1]] --seqtab_file_path=[depends[0]] --otu_closed_ref_path=[targets[0]] --threads=[vars[3]] --tryRC=[vars[4]] && check_for_reverse_reads.py --input [targets[0]]', depends=[seqtab_file_path], targets=[otu_closed_ref_path], args=[output_folder], vars=[refdb_path, refdb_species_path, script_path, threads, tryRC], name='assign_taxonomy') return otu_closed_ref_path
Assigns taxonomy using green genes, silva, or rdp database, creates closed reference file Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder seqtab_file_path (string): path to rds file that contains ASV data ref_path (string): reference database name threads (int): Requires: dada2 r package Returns: string: path to closed reference file
biobakery_workflows/tasks/dadatwo.py
assign_taxonomy
tkuntz-hsph/biobakery_workflows
47
python
def assign_taxonomy(workflow, output_folder, seqtab_file_path, ref_path, threads, tryRC): ' Assigns taxonomy using green genes, silva, or rdp database, creates closed reference file\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n seqtab_file_path (string): path to rds file that contains ASV data\n ref_path (string): reference database name\n threads (int):\n \n Requires:\n dada2 r package\n \n Returns:\n string: path to closed reference file\n ' otu_closed_ref_path = files.SixteenS.path('otu_table_closed_reference', output_folder) if (ref_path == 'unite'): refdb_path = config.SixteenS().unite refdb_species_path = 'None' elif (ref_path == 'silva'): refdb_path = config.SixteenS().silva_dada2 refdb_species_path = config.SixteenS().silva_species_dada2 elif (ref_path == 'rdp'): refdb_path = config.SixteenS().rdp_dada2 refdb_species_path = config.SixteenS().rdp_species_dada2 else: refdb_path = config.SixteenS().greengenes_dada2 refdb_species_path = 'None' script_path = utilities.get_package_file('assign_taxonomy', 'Rscript') workflow.add_task('[vars[2]] --output_dir=[args[0]] --refdb_path=[vars[0]] --refdb_species_path=[vars[1]] --seqtab_file_path=[depends[0]] --otu_closed_ref_path=[targets[0]] --threads=[vars[3]] --tryRC=[vars[4]] && check_for_reverse_reads.py --input [targets[0]]', depends=[seqtab_file_path], targets=[otu_closed_ref_path], args=[output_folder], vars=[refdb_path, refdb_species_path, script_path, threads, tryRC], name='assign_taxonomy') return otu_closed_ref_path
def assign_taxonomy(workflow, output_folder, seqtab_file_path, ref_path, threads, tryRC): ' Assigns taxonomy using green genes, silva, or rdp database, creates closed reference file\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder\n seqtab_file_path (string): path to rds file that contains ASV data\n ref_path (string): reference database name\n threads (int):\n \n Requires:\n dada2 r package\n \n Returns:\n string: path to closed reference file\n ' otu_closed_ref_path = files.SixteenS.path('otu_table_closed_reference', output_folder) if (ref_path == 'unite'): refdb_path = config.SixteenS().unite refdb_species_path = 'None' elif (ref_path == 'silva'): refdb_path = config.SixteenS().silva_dada2 refdb_species_path = config.SixteenS().silva_species_dada2 elif (ref_path == 'rdp'): refdb_path = config.SixteenS().rdp_dada2 refdb_species_path = config.SixteenS().rdp_species_dada2 else: refdb_path = config.SixteenS().greengenes_dada2 refdb_species_path = 'None' script_path = utilities.get_package_file('assign_taxonomy', 'Rscript') workflow.add_task('[vars[2]] --output_dir=[args[0]] --refdb_path=[vars[0]] --refdb_species_path=[vars[1]] --seqtab_file_path=[depends[0]] --otu_closed_ref_path=[targets[0]] --threads=[vars[3]] --tryRC=[vars[4]] && check_for_reverse_reads.py --input [targets[0]]', depends=[seqtab_file_path], targets=[otu_closed_ref_path], args=[output_folder], vars=[refdb_path, refdb_species_path, script_path, threads, tryRC], name='assign_taxonomy') return otu_closed_ref_path<|docstring|>Assigns taxonomy using green genes, silva, or rdp database, creates closed reference file Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder seqtab_file_path (string): path to rds file that contains ASV data ref_path (string): reference database name threads (int): Requires: dada2 r package Returns: string: path to closed reference file<|endoftext|>
22c3c2c21fb709488e3ee25c254af2d5b1ecb1d79c9fc6a731fc262693631d30
def remove_tmp_files(workflow, output_folder, otu_closed_ref_path, msa_fasta_path, fasttree_file_path): ' Removes temporary rds files\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder.\n otu_closed_ref_path (string): path to closed reference file\n msa_fasta_path (string): path to msa file\n fasttree_file_path (string): path to phylogenetic tree file\n\n Requires:\n None\n \n Returns:\n None\n ' rm_out_file = os.path.join(output_folder, 'tmp_rm.txt') workflow.add_task('rm [args[0]]/*.rds &>[targets[0]] ', depends=[otu_closed_ref_path, msa_fasta_path, fasttree_file_path], args=[output_folder], targets=[rm_out_file], name='rm_tmp_files')
Removes temporary rds files Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder. otu_closed_ref_path (string): path to closed reference file msa_fasta_path (string): path to msa file fasttree_file_path (string): path to phylogenetic tree file Requires: None Returns: None
biobakery_workflows/tasks/dadatwo.py
remove_tmp_files
tkuntz-hsph/biobakery_workflows
47
python
def remove_tmp_files(workflow, output_folder, otu_closed_ref_path, msa_fasta_path, fasttree_file_path): ' Removes temporary rds files\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder.\n otu_closed_ref_path (string): path to closed reference file\n msa_fasta_path (string): path to msa file\n fasttree_file_path (string): path to phylogenetic tree file\n\n Requires:\n None\n \n Returns:\n None\n ' rm_out_file = os.path.join(output_folder, 'tmp_rm.txt') workflow.add_task('rm [args[0]]/*.rds &>[targets[0]] ', depends=[otu_closed_ref_path, msa_fasta_path, fasttree_file_path], args=[output_folder], targets=[rm_out_file], name='rm_tmp_files')
def remove_tmp_files(workflow, output_folder, otu_closed_ref_path, msa_fasta_path, fasttree_file_path): ' Removes temporary rds files\n \n Args:\n workflow (anadama2.workflow): an instance of the workflow class\n output_folder (string): path to output folder.\n otu_closed_ref_path (string): path to closed reference file\n msa_fasta_path (string): path to msa file\n fasttree_file_path (string): path to phylogenetic tree file\n\n Requires:\n None\n \n Returns:\n None\n ' rm_out_file = os.path.join(output_folder, 'tmp_rm.txt') workflow.add_task('rm [args[0]]/*.rds &>[targets[0]] ', depends=[otu_closed_ref_path, msa_fasta_path, fasttree_file_path], args=[output_folder], targets=[rm_out_file], name='rm_tmp_files')<|docstring|>Removes temporary rds files Args: workflow (anadama2.workflow): an instance of the workflow class output_folder (string): path to output folder. otu_closed_ref_path (string): path to closed reference file msa_fasta_path (string): path to msa file fasttree_file_path (string): path to phylogenetic tree file Requires: None Returns: None<|endoftext|>
8f5bbf0be0384edfdfb96757032078b41a996c66d5233493da4790a8dda7e76b
def invalidate_all_tokens(self, user=None): '\n To invalidate all tokens the user uuid is changed\n ' if (user is None): user = self._user user.uuid = getUUID() user.save() log.warning('User uuid changed to: {}', user.uuid) return True
To invalidate all tokens the user uuid is changed
restapi/services/authentication/mongo.py
invalidate_all_tokens
fossabot/http-api
0
python
def invalidate_all_tokens(self, user=None): '\n \n ' if (user is None): user = self._user user.uuid = getUUID() user.save() log.warning('User uuid changed to: {}', user.uuid) return True
def invalidate_all_tokens(self, user=None): '\n \n ' if (user is None): user = self._user user.uuid = getUUID() user.save() log.warning('User uuid changed to: {}', user.uuid) return True<|docstring|>To invalidate all tokens the user uuid is changed<|endoftext|>
46fba7fdac2ac30b098a2797cf886eff3b88106dee0869e84e06b0d090804123
def build_url_string(uri_scheme=None, uri_authority=None, uri_path=None, uri_query=None, uri_fragment=None, query_string_parameters=None): '\n https://tools.ietf.org/html/rfc3986#section-3\n URI Components: scheme, authority, path, query, fragment\n uri_query and query_parameter_string are mutually-exclusive.\n :param uri_scheme: [optional] (Default: "https")\n :type uri_scheme: str\n :param uri_authority: [required]\n :type uri_authority: str\n :param uri_path: [required]\n :type uri_path: str\n :param uri_query: [optional] (Default: None)\n :type uri_query: str\n :param uri_fragment: [optional] (Default: None)\n :type uri_fragment: str\n :param query_string_parameters:\n :type query_string_parameters: dict\n :return:\n ' if (uri_scheme is None): pass if (uri_authority is None): raise ValueError('uri_authority? Nein danke!') if (uri_path is None): raise ValueError('uri_path? Nein danke!') if (uri_query is None): pass if (uri_fragment is None): pass if (query_string_parameters is None): pass if (uri_query and query_string_parameters): print('Are you trying to get a rise out of me, Agent Kujan') print('PS. What shall we do if you pass a query /and/ query_string_parameters?') if (query_string_parameters is not None): urlencode(query_string_parameters) uri_scheme = (DEFAULT_URI_SCHEME if (uri_scheme is None) else uri_scheme) url_string = '{uri_scheme}://{uri_authority}{uri_path}'.format(uri_scheme=uri_scheme, uri_authority=uri_authority, uri_path=uri_path) if uri_query: if (not uri_query.startswith('?')): uri_query = ('?' + uri_query) url_string = (url_string + uri_query) if uri_fragment: if (not uri_fragment.startswith('#')): uri_fragment = ('#' + uri_fragment) url_string = (url_string + uri_fragment) return url_string
https://tools.ietf.org/html/rfc3986#section-3 URI Components: scheme, authority, path, query, fragment uri_query and query_parameter_string are mutually-exclusive. :param uri_scheme: [optional] (Default: "https") :type uri_scheme: str :param uri_authority: [required] :type uri_authority: str :param uri_path: [required] :type uri_path: str :param uri_query: [optional] (Default: None) :type uri_query: str :param uri_fragment: [optional] (Default: None) :type uri_fragment: str :param query_string_parameters: :type query_string_parameters: dict :return:
google_places_api_cacher/ats_utilities/ats_url.py
build_url_string
james-w-balcomb/google-places-api-cacher
1
python
def build_url_string(uri_scheme=None, uri_authority=None, uri_path=None, uri_query=None, uri_fragment=None, query_string_parameters=None): '\n https://tools.ietf.org/html/rfc3986#section-3\n URI Components: scheme, authority, path, query, fragment\n uri_query and query_parameter_string are mutually-exclusive.\n :param uri_scheme: [optional] (Default: "https")\n :type uri_scheme: str\n :param uri_authority: [required]\n :type uri_authority: str\n :param uri_path: [required]\n :type uri_path: str\n :param uri_query: [optional] (Default: None)\n :type uri_query: str\n :param uri_fragment: [optional] (Default: None)\n :type uri_fragment: str\n :param query_string_parameters:\n :type query_string_parameters: dict\n :return:\n ' if (uri_scheme is None): pass if (uri_authority is None): raise ValueError('uri_authority? Nein danke!') if (uri_path is None): raise ValueError('uri_path? Nein danke!') if (uri_query is None): pass if (uri_fragment is None): pass if (query_string_parameters is None): pass if (uri_query and query_string_parameters): print('Are you trying to get a rise out of me, Agent Kujan') print('PS. What shall we do if you pass a query /and/ query_string_parameters?') if (query_string_parameters is not None): urlencode(query_string_parameters) uri_scheme = (DEFAULT_URI_SCHEME if (uri_scheme is None) else uri_scheme) url_string = '{uri_scheme}://{uri_authority}{uri_path}'.format(uri_scheme=uri_scheme, uri_authority=uri_authority, uri_path=uri_path) if uri_query: if (not uri_query.startswith('?')): uri_query = ('?' + uri_query) url_string = (url_string + uri_query) if uri_fragment: if (not uri_fragment.startswith('#')): uri_fragment = ('#' + uri_fragment) url_string = (url_string + uri_fragment) return url_string
def build_url_string(uri_scheme=None, uri_authority=None, uri_path=None, uri_query=None, uri_fragment=None, query_string_parameters=None): '\n https://tools.ietf.org/html/rfc3986#section-3\n URI Components: scheme, authority, path, query, fragment\n uri_query and query_parameter_string are mutually-exclusive.\n :param uri_scheme: [optional] (Default: "https")\n :type uri_scheme: str\n :param uri_authority: [required]\n :type uri_authority: str\n :param uri_path: [required]\n :type uri_path: str\n :param uri_query: [optional] (Default: None)\n :type uri_query: str\n :param uri_fragment: [optional] (Default: None)\n :type uri_fragment: str\n :param query_string_parameters:\n :type query_string_parameters: dict\n :return:\n ' if (uri_scheme is None): pass if (uri_authority is None): raise ValueError('uri_authority? Nein danke!') if (uri_path is None): raise ValueError('uri_path? Nein danke!') if (uri_query is None): pass if (uri_fragment is None): pass if (query_string_parameters is None): pass if (uri_query and query_string_parameters): print('Are you trying to get a rise out of me, Agent Kujan') print('PS. What shall we do if you pass a query /and/ query_string_parameters?') if (query_string_parameters is not None): urlencode(query_string_parameters) uri_scheme = (DEFAULT_URI_SCHEME if (uri_scheme is None) else uri_scheme) url_string = '{uri_scheme}://{uri_authority}{uri_path}'.format(uri_scheme=uri_scheme, uri_authority=uri_authority, uri_path=uri_path) if uri_query: if (not uri_query.startswith('?')): uri_query = ('?' + uri_query) url_string = (url_string + uri_query) if uri_fragment: if (not uri_fragment.startswith('#')): uri_fragment = ('#' + uri_fragment) url_string = (url_string + uri_fragment) return url_string<|docstring|>https://tools.ietf.org/html/rfc3986#section-3 URI Components: scheme, authority, path, query, fragment uri_query and query_parameter_string are mutually-exclusive. :param uri_scheme: [optional] (Default: "https") :type uri_scheme: str :param uri_authority: [required] :type uri_authority: str :param uri_path: [required] :type uri_path: str :param uri_query: [optional] (Default: None) :type uri_query: str :param uri_fragment: [optional] (Default: None) :type uri_fragment: str :param query_string_parameters: :type query_string_parameters: dict :return:<|endoftext|>
ee571aac756235e61e86a338ea8d0d1296cb5cda85ea0dc03d9374862871591e
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: 'Set up device tracker for Freebox component.' router: FreeboxRouter = hass.data[DOMAIN][entry.unique_id] tracked: set[str] = set() @callback def update_router() -> None: 'Update the values of the router.' add_entities(router, async_add_entities, tracked) entry.async_on_unload(async_dispatcher_connect(hass, router.signal_device_new, update_router)) update_router()
Set up device tracker for Freebox component.
homeassistant/components/freebox/device_tracker.py
async_setup_entry
GrandMoff100/homeassistant-core
30,023
python
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: router: FreeboxRouter = hass.data[DOMAIN][entry.unique_id] tracked: set[str] = set() @callback def update_router() -> None: 'Update the values of the router.' add_entities(router, async_add_entities, tracked) entry.async_on_unload(async_dispatcher_connect(hass, router.signal_device_new, update_router)) update_router()
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback) -> None: router: FreeboxRouter = hass.data[DOMAIN][entry.unique_id] tracked: set[str] = set() @callback def update_router() -> None: 'Update the values of the router.' add_entities(router, async_add_entities, tracked) entry.async_on_unload(async_dispatcher_connect(hass, router.signal_device_new, update_router)) update_router()<|docstring|>Set up device tracker for Freebox component.<|endoftext|>
28750987f52c6816a35011df36550cd98a94a3fe78a5a011c2fc67800c243de1
@callback def add_entities(router: FreeboxRouter, async_add_entities: AddEntitiesCallback, tracked: set[str]) -> None: 'Add new tracker entities from the router.' new_tracked = [] for (mac, device) in router.devices.items(): if (mac in tracked): continue new_tracked.append(FreeboxDevice(router, device)) tracked.add(mac) if new_tracked: async_add_entities(new_tracked, True)
Add new tracker entities from the router.
homeassistant/components/freebox/device_tracker.py
add_entities
GrandMoff100/homeassistant-core
30,023
python
@callback def add_entities(router: FreeboxRouter, async_add_entities: AddEntitiesCallback, tracked: set[str]) -> None: new_tracked = [] for (mac, device) in router.devices.items(): if (mac in tracked): continue new_tracked.append(FreeboxDevice(router, device)) tracked.add(mac) if new_tracked: async_add_entities(new_tracked, True)
@callback def add_entities(router: FreeboxRouter, async_add_entities: AddEntitiesCallback, tracked: set[str]) -> None: new_tracked = [] for (mac, device) in router.devices.items(): if (mac in tracked): continue new_tracked.append(FreeboxDevice(router, device)) tracked.add(mac) if new_tracked: async_add_entities(new_tracked, True)<|docstring|>Add new tracker entities from the router.<|endoftext|>
0c9a430cd83103f94462838570bcb58a553d92a339f942bf3c561be3e59e979d
def icon_for_freebox_device(device) -> str: 'Return a device icon from its type.' return DEVICE_ICONS.get(device['host_type'], 'mdi:help-network')
Return a device icon from its type.
homeassistant/components/freebox/device_tracker.py
icon_for_freebox_device
GrandMoff100/homeassistant-core
30,023
python
def icon_for_freebox_device(device) -> str: return DEVICE_ICONS.get(device['host_type'], 'mdi:help-network')
def icon_for_freebox_device(device) -> str: return DEVICE_ICONS.get(device['host_type'], 'mdi:help-network')<|docstring|>Return a device icon from its type.<|endoftext|>
2671b2c751969dd1fcbeace75482830fd5491f0b22a241f2290d7342f40ae2a7
@callback def update_router() -> None: 'Update the values of the router.' add_entities(router, async_add_entities, tracked)
Update the values of the router.
homeassistant/components/freebox/device_tracker.py
update_router
GrandMoff100/homeassistant-core
30,023
python
@callback def update_router() -> None: add_entities(router, async_add_entities, tracked)
@callback def update_router() -> None: add_entities(router, async_add_entities, tracked)<|docstring|>Update the values of the router.<|endoftext|>
23930a3b90294df4393762645c87bcd128e1626fa31706710a9d2359c6ffc7a1
def __init__(self, router: FreeboxRouter, device: dict[(str, Any)]) -> None: 'Initialize a Freebox device.' self._router = router self._name = (device['primary_name'].strip() or DEFAULT_DEVICE_NAME) self._mac = device['l2ident']['id'] self._manufacturer = device['vendor_name'] self._icon = icon_for_freebox_device(device) self._active = False self._attrs: dict[(str, Any)] = {}
Initialize a Freebox device.
homeassistant/components/freebox/device_tracker.py
__init__
GrandMoff100/homeassistant-core
30,023
python
def __init__(self, router: FreeboxRouter, device: dict[(str, Any)]) -> None: self._router = router self._name = (device['primary_name'].strip() or DEFAULT_DEVICE_NAME) self._mac = device['l2ident']['id'] self._manufacturer = device['vendor_name'] self._icon = icon_for_freebox_device(device) self._active = False self._attrs: dict[(str, Any)] = {}
def __init__(self, router: FreeboxRouter, device: dict[(str, Any)]) -> None: self._router = router self._name = (device['primary_name'].strip() or DEFAULT_DEVICE_NAME) self._mac = device['l2ident']['id'] self._manufacturer = device['vendor_name'] self._icon = icon_for_freebox_device(device) self._active = False self._attrs: dict[(str, Any)] = {}<|docstring|>Initialize a Freebox device.<|endoftext|>
8f9f1838e83633be6fd5001ff6301560ce93c5d8caf65d0c3fd05da935d73b88
@callback def async_update_state(self) -> None: 'Update the Freebox device.' device = self._router.devices[self._mac] self._active = device['active'] if (device.get('attrs') is None): self._attrs = {'last_time_reachable': datetime.fromtimestamp(device['last_time_reachable']), 'last_time_activity': datetime.fromtimestamp(device['last_activity'])} else: self._attrs = device['attrs']
Update the Freebox device.
homeassistant/components/freebox/device_tracker.py
async_update_state
GrandMoff100/homeassistant-core
30,023
python
@callback def async_update_state(self) -> None: device = self._router.devices[self._mac] self._active = device['active'] if (device.get('attrs') is None): self._attrs = {'last_time_reachable': datetime.fromtimestamp(device['last_time_reachable']), 'last_time_activity': datetime.fromtimestamp(device['last_activity'])} else: self._attrs = device['attrs']
@callback def async_update_state(self) -> None: device = self._router.devices[self._mac] self._active = device['active'] if (device.get('attrs') is None): self._attrs = {'last_time_reachable': datetime.fromtimestamp(device['last_time_reachable']), 'last_time_activity': datetime.fromtimestamp(device['last_activity'])} else: self._attrs = device['attrs']<|docstring|>Update the Freebox device.<|endoftext|>
9fe0c6e2d94931e1186f599d3a7f0c3f4887a435c556fe7fd759afa2708b8cb2
@property def mac_address(self) -> str: 'Return a unique ID.' return self._mac
Return a unique ID.
homeassistant/components/freebox/device_tracker.py
mac_address
GrandMoff100/homeassistant-core
30,023
python
@property def mac_address(self) -> str: return self._mac
@property def mac_address(self) -> str: return self._mac<|docstring|>Return a unique ID.<|endoftext|>
6461a800075af75cf8e501085571d402bafd8cc18e72608adbf1e1cee60aa916
@property def name(self) -> str: 'Return the name.' return self._name
Return the name.
homeassistant/components/freebox/device_tracker.py
name
GrandMoff100/homeassistant-core
30,023
python
@property def name(self) -> str: return self._name
@property def name(self) -> str: return self._name<|docstring|>Return the name.<|endoftext|>
48241ed7ec4118214370c7fcb068b9f9677c729f15dd9d8b35119f2ebd99abc4
@property def is_connected(self): 'Return true if the device is connected to the network.' return self._active
Return true if the device is connected to the network.
homeassistant/components/freebox/device_tracker.py
is_connected
GrandMoff100/homeassistant-core
30,023
python
@property def is_connected(self): return self._active
@property def is_connected(self): return self._active<|docstring|>Return true if the device is connected to the network.<|endoftext|>
46f58a1645c6343dbb1258cb8ee12371687f459946be74f83fe6e94e57f78b1f
@property def source_type(self) -> str: 'Return the source type.' return SOURCE_TYPE_ROUTER
Return the source type.
homeassistant/components/freebox/device_tracker.py
source_type
GrandMoff100/homeassistant-core
30,023
python
@property def source_type(self) -> str: return SOURCE_TYPE_ROUTER
@property def source_type(self) -> str: return SOURCE_TYPE_ROUTER<|docstring|>Return the source type.<|endoftext|>
05dab95798aa59b72c55641def3865cbb29c704573ab7fbebf793e753b0ff8bb
@property def icon(self) -> str: 'Return the icon.' return self._icon
Return the icon.
homeassistant/components/freebox/device_tracker.py
icon
GrandMoff100/homeassistant-core
30,023
python
@property def icon(self) -> str: return self._icon
@property def icon(self) -> str: return self._icon<|docstring|>Return the icon.<|endoftext|>
b7adc6c3a39d1f5b956797a5d2ff5b51927419d8e51d04930116cc3dd0897edb
@property def extra_state_attributes(self) -> dict[(str, Any)]: 'Return the attributes.' return self._attrs
Return the attributes.
homeassistant/components/freebox/device_tracker.py
extra_state_attributes
GrandMoff100/homeassistant-core
30,023
python
@property def extra_state_attributes(self) -> dict[(str, Any)]: return self._attrs
@property def extra_state_attributes(self) -> dict[(str, Any)]: return self._attrs<|docstring|>Return the attributes.<|endoftext|>
9ecb1e6c2d5a7ad8bb2880d8b6acd725a49d529d4ea0beecec22b00711318188
@property def should_poll(self) -> bool: 'No polling needed.' return False
No polling needed.
homeassistant/components/freebox/device_tracker.py
should_poll
GrandMoff100/homeassistant-core
30,023
python
@property def should_poll(self) -> bool: return False
@property def should_poll(self) -> bool: return False<|docstring|>No polling needed.<|endoftext|>
1c923b940e9ccfb3d7d0692d79a6561666537efa4f4562149620a1081be74581
@callback def async_on_demand_update(self): 'Update state.' self.async_update_state() self.async_write_ha_state()
Update state.
homeassistant/components/freebox/device_tracker.py
async_on_demand_update
GrandMoff100/homeassistant-core
30,023
python
@callback def async_on_demand_update(self): self.async_update_state() self.async_write_ha_state()
@callback def async_on_demand_update(self): self.async_update_state() self.async_write_ha_state()<|docstring|>Update state.<|endoftext|>
4e1c46e5a85fe447717eabffffe33e3d0e5aaccf231ba50a1c0ccd641768aed2
async def async_added_to_hass(self): 'Register state update callback.' self.async_update_state() self.async_on_remove(async_dispatcher_connect(self.hass, self._router.signal_device_update, self.async_on_demand_update))
Register state update callback.
homeassistant/components/freebox/device_tracker.py
async_added_to_hass
GrandMoff100/homeassistant-core
30,023
python
async def async_added_to_hass(self): self.async_update_state() self.async_on_remove(async_dispatcher_connect(self.hass, self._router.signal_device_update, self.async_on_demand_update))
async def async_added_to_hass(self): self.async_update_state() self.async_on_remove(async_dispatcher_connect(self.hass, self._router.signal_device_update, self.async_on_demand_update))<|docstring|>Register state update callback.<|endoftext|>
bf86529f1020548201b338ade45db081ca7b35313f1373503086e7046e920bc5
def __init__(self, name=None, level='info'): ' HEV logging class\n\n Centralizing all logging capabilities\n ' FORMAT = '%(asctime)s [%(levelname)s\t] [%(name)s] %(message)s' self.name = name self.level = self._level(level) self.logging = logging self.logging.basicConfig(format=FORMAT, level=self.level) self.logging = self.logging.getLogger(name) self.logging.setLevel(self.level)
HEV logging class Centralizing all logging capabilities
core/helpers/hevlog.py
__init__
N7SALab/HEV
0
python
def __init__(self, name=None, level='info'): ' HEV logging class\n\n Centralizing all logging capabilities\n ' FORMAT = '%(asctime)s [%(levelname)s\t] [%(name)s] %(message)s' self.name = name self.level = self._level(level) self.logging = logging self.logging.basicConfig(format=FORMAT, level=self.level) self.logging = self.logging.getLogger(name) self.logging.setLevel(self.level)
def __init__(self, name=None, level='info'): ' HEV logging class\n\n Centralizing all logging capabilities\n ' FORMAT = '%(asctime)s [%(levelname)s\t] [%(name)s] %(message)s' self.name = name self.level = self._level(level) self.logging = logging self.logging.basicConfig(format=FORMAT, level=self.level) self.logging = self.logging.getLogger(name) self.logging.setLevel(self.level)<|docstring|>HEV logging class Centralizing all logging capabilities<|endoftext|>
fe4cdd399bc3558bce5ba52c58c21093aea62398a5559947bd0285ccd3c5913e
def _level(self, level): ' a way to set the level\n ' if (level is None): return INFO elif ((level.lower() == 'notset') or (level.lower() == 'off')): return NOTSET elif ((level.lower() == 'debug') or (level.lower() == 'd')): return DEBUG elif ((level.lower() == 'info') or (level.lower() == 'i')): return INFO elif ((level.lower() == 'warning') or (level.lower() == 'w')): return WARN elif ((level.lower() == 'error') or (level.lower() == 'e')): return ERROR elif ((level.lower() == 'fatal') or (level.lower() == 'f')): return FATAL elif ((level.lower() == 'critical') or (level.lower() == 'c')): return CRITICAL else: return INFO
a way to set the level
core/helpers/hevlog.py
_level
N7SALab/HEV
0
python
def _level(self, level): ' \n ' if (level is None): return INFO elif ((level.lower() == 'notset') or (level.lower() == 'off')): return NOTSET elif ((level.lower() == 'debug') or (level.lower() == 'd')): return DEBUG elif ((level.lower() == 'info') or (level.lower() == 'i')): return INFO elif ((level.lower() == 'warning') or (level.lower() == 'w')): return WARN elif ((level.lower() == 'error') or (level.lower() == 'e')): return ERROR elif ((level.lower() == 'fatal') or (level.lower() == 'f')): return FATAL elif ((level.lower() == 'critical') or (level.lower() == 'c')): return CRITICAL else: return INFO
def _level(self, level): ' \n ' if (level is None): return INFO elif ((level.lower() == 'notset') or (level.lower() == 'off')): return NOTSET elif ((level.lower() == 'debug') or (level.lower() == 'd')): return DEBUG elif ((level.lower() == 'info') or (level.lower() == 'i')): return INFO elif ((level.lower() == 'warning') or (level.lower() == 'w')): return WARN elif ((level.lower() == 'error') or (level.lower() == 'e')): return ERROR elif ((level.lower() == 'fatal') or (level.lower() == 'f')): return FATAL elif ((level.lower() == 'critical') or (level.lower() == 'c')): return CRITICAL else: return INFO<|docstring|>a way to set the level<|endoftext|>
ad8e8d84d6b7be24405fae623b0f1e16310d625fd4dfc55497470b37988429da
def rotate(self, nums: List[int], k: int) -> None: '\n Do not return anything, modify nums in-place instead.\n ' k = (k % len(nums)) def reverse(nums, left, right): while (left <= right): (nums[left], nums[right]) = (nums[right], nums[left]) left += 1 right -= 1 reverse(nums, 0, (len(nums) - 1)) reverse(nums, 0, (k - 1)) reverse(nums, k, (len(nums) - 1))
Do not return anything, modify nums in-place instead.
189_rotate.py
rotate
xinming365/LeetCode
0
python
def rotate(self, nums: List[int], k: int) -> None: '\n \n ' k = (k % len(nums)) def reverse(nums, left, right): while (left <= right): (nums[left], nums[right]) = (nums[right], nums[left]) left += 1 right -= 1 reverse(nums, 0, (len(nums) - 1)) reverse(nums, 0, (k - 1)) reverse(nums, k, (len(nums) - 1))
def rotate(self, nums: List[int], k: int) -> None: '\n \n ' k = (k % len(nums)) def reverse(nums, left, right): while (left <= right): (nums[left], nums[right]) = (nums[right], nums[left]) left += 1 right -= 1 reverse(nums, 0, (len(nums) - 1)) reverse(nums, 0, (k - 1)) reverse(nums, k, (len(nums) - 1))<|docstring|>Do not return anything, modify nums in-place instead.<|endoftext|>
0bb1b573e720d0886368ec3b60a0dced52160d2b1a4aa9fe7b59a4c9e22aea09
def rotate(self, nums: List[int], k: int) -> None: '\n Do not return anything, modify nums in-place instead.\n ' n = len(nums) nums[:] = (nums[(n - k):] + nums[:(n - k)])
Do not return anything, modify nums in-place instead.
189_rotate.py
rotate
xinming365/LeetCode
0
python
def rotate(self, nums: List[int], k: int) -> None: '\n \n ' n = len(nums) nums[:] = (nums[(n - k):] + nums[:(n - k)])
def rotate(self, nums: List[int], k: int) -> None: '\n \n ' n = len(nums) nums[:] = (nums[(n - k):] + nums[:(n - k)])<|docstring|>Do not return anything, modify nums in-place instead.<|endoftext|>
f867caa28e3d6a428493db1d9f44b246fda433600f4315352a5b469e3b18e644
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768') def test_basic_metadata_definition_namespaces(self): 'Test operations of image metadata definition namespaces' body = self.resource_types_client.list_resource_types() resource_name = body['resource_types'][0]['name'] name = [{'name': resource_name}] namespace_name = data_utils.rand_name('namespace') body = self.namespaces_client.create_namespace(namespace=namespace_name, visibility='public', description='Tempest', display_name=namespace_name, resource_type_associations=name, protected=True) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self._cleanup_namespace, namespace_name) bodys = self.namespaces_client.list_namespaces()['namespaces'] body = [namespace['namespace'] for namespace in bodys] self.assertIn(namespace_name, body) body = self.namespaces_client.show_namespace(namespace_name) self.assertEqual(namespace_name, body['namespace']) self.assertEqual('public', body['visibility']) self.assertRaises(lib_exc.Forbidden, self.namespaces_client.delete_namespace, namespace_name) body = self.namespaces_client.update_namespace(namespace=namespace_name, description='Tempest', visibility='private', display_name=namespace_name, protected=False) self.assertEqual('private', body['visibility']) self.assertEqual(False, body['protected']) self.namespaces_client.delete_namespace(namespace_name)
Test operations of image metadata definition namespaces
tempest/api/image/v2/admin/test_images_metadefs_namespaces.py
test_basic_metadata_definition_namespaces
jonringer/tempest
254
python
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768') def test_basic_metadata_definition_namespaces(self): body = self.resource_types_client.list_resource_types() resource_name = body['resource_types'][0]['name'] name = [{'name': resource_name}] namespace_name = data_utils.rand_name('namespace') body = self.namespaces_client.create_namespace(namespace=namespace_name, visibility='public', description='Tempest', display_name=namespace_name, resource_type_associations=name, protected=True) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self._cleanup_namespace, namespace_name) bodys = self.namespaces_client.list_namespaces()['namespaces'] body = [namespace['namespace'] for namespace in bodys] self.assertIn(namespace_name, body) body = self.namespaces_client.show_namespace(namespace_name) self.assertEqual(namespace_name, body['namespace']) self.assertEqual('public', body['visibility']) self.assertRaises(lib_exc.Forbidden, self.namespaces_client.delete_namespace, namespace_name) body = self.namespaces_client.update_namespace(namespace=namespace_name, description='Tempest', visibility='private', display_name=namespace_name, protected=False) self.assertEqual('private', body['visibility']) self.assertEqual(False, body['protected']) self.namespaces_client.delete_namespace(namespace_name)
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768') def test_basic_metadata_definition_namespaces(self): body = self.resource_types_client.list_resource_types() resource_name = body['resource_types'][0]['name'] name = [{'name': resource_name}] namespace_name = data_utils.rand_name('namespace') body = self.namespaces_client.create_namespace(namespace=namespace_name, visibility='public', description='Tempest', display_name=namespace_name, resource_type_associations=name, protected=True) self.addCleanup(test_utils.call_and_ignore_notfound_exc, self._cleanup_namespace, namespace_name) bodys = self.namespaces_client.list_namespaces()['namespaces'] body = [namespace['namespace'] for namespace in bodys] self.assertIn(namespace_name, body) body = self.namespaces_client.show_namespace(namespace_name) self.assertEqual(namespace_name, body['namespace']) self.assertEqual('public', body['visibility']) self.assertRaises(lib_exc.Forbidden, self.namespaces_client.delete_namespace, namespace_name) body = self.namespaces_client.update_namespace(namespace=namespace_name, description='Tempest', visibility='private', display_name=namespace_name, protected=False) self.assertEqual('private', body['visibility']) self.assertEqual(False, body['protected']) self.namespaces_client.delete_namespace(namespace_name)<|docstring|>Test operations of image metadata definition namespaces<|endoftext|>
a15bef1692c74feb7aecf1055a590b19b3227348211bb39b307ad7b2526cfd0f
def reset(self): '\n Reset all environments\n ' obs = self.venv.reset() self.stackedobs[...] = 0 self.stackedobs[(..., (- obs.shape[(- 1)]):)] = obs return self.stackedobs
Reset all environments
pytorch-a2c-ppo-acktr/vec_env/vec_frame_stack.py
reset
sahandrez/gym-miniworld
499
python
def reset(self): '\n \n ' obs = self.venv.reset() self.stackedobs[...] = 0 self.stackedobs[(..., (- obs.shape[(- 1)]):)] = obs return self.stackedobs
def reset(self): '\n \n ' obs = self.venv.reset() self.stackedobs[...] = 0 self.stackedobs[(..., (- obs.shape[(- 1)]):)] = obs return self.stackedobs<|docstring|>Reset all environments<|endoftext|>
7df925753194d2cff4191113a7f676b0915f9ac0f9e0d21904bee67a45da8474
def categorical_sample(prob_n, np_random): 'Sample from a categorical distribution.' prob_n = np.asarray(prob_n) csprob_n = np.cumsum(prob_n) return (csprob_n > np_random.rand()).argmax()
Sample from a categorical distribution.
rlsuite/envs/aliased_gridworld/aliased_gridworld.py
categorical_sample
christopher-wolff/lab
2
python
def categorical_sample(prob_n, np_random): prob_n = np.asarray(prob_n) csprob_n = np.cumsum(prob_n) return (csprob_n > np_random.rand()).argmax()
def categorical_sample(prob_n, np_random): prob_n = np.asarray(prob_n) csprob_n = np.cumsum(prob_n) return (csprob_n > np_random.rand()).argmax()<|docstring|>Sample from a categorical distribution.<|endoftext|>
089f4fdec7f3084497c5941d7cc07177ac2dede1f214068db10afdf28eec9285
def do_query_update(query, params={}): '\n Execute one query statement,\n Return result and statistics\n ' url = get_neo4j_api_url('/db/data/transaction/commit') query_request = {'statements': [{'statement': query, 'parameters': params, 'includeStats': True}]} query_request = json.dumps(query_request).encode() response = requests.post(url, data=query_request) raise_for_update_errors(response) return response
Execute one query statement, Return result and statistics
graph_data/neo4j.py
do_query_update
andreisirghi/graph-data
0
python
def do_query_update(query, params={}): '\n Execute one query statement,\n Return result and statistics\n ' url = get_neo4j_api_url('/db/data/transaction/commit') query_request = {'statements': [{'statement': query, 'parameters': params, 'includeStats': True}]} query_request = json.dumps(query_request).encode() response = requests.post(url, data=query_request) raise_for_update_errors(response) return response
def do_query_update(query, params={}): '\n Execute one query statement,\n Return result and statistics\n ' url = get_neo4j_api_url('/db/data/transaction/commit') query_request = {'statements': [{'statement': query, 'parameters': params, 'includeStats': True}]} query_request = json.dumps(query_request).encode() response = requests.post(url, data=query_request) raise_for_update_errors(response) return response<|docstring|>Execute one query statement, Return result and statistics<|endoftext|>
f2e304901b26385e9ecc2c8d7b2eebb017e65b7d6114e4722f7ba467291ae95a
def do_query_update_batch(queries): '\n Execute multiple query statements,\n Return result and statistics for each of the statements\n ' url = get_neo4j_api_url('/db/data/transaction/commit') statements = [] for query in queries: statement = {'statement': query['statement'], 'parameters': query['params'], 'includeStats': True} statements.append(statement) query_request = {'statements': statements} query_request = json.dumps(query_request).encode() response = requests.post(url, data=query_request) raise_for_update_errors(response) return response
Execute multiple query statements, Return result and statistics for each of the statements
graph_data/neo4j.py
do_query_update_batch
andreisirghi/graph-data
0
python
def do_query_update_batch(queries): '\n Execute multiple query statements,\n Return result and statistics for each of the statements\n ' url = get_neo4j_api_url('/db/data/transaction/commit') statements = [] for query in queries: statement = {'statement': query['statement'], 'parameters': query['params'], 'includeStats': True} statements.append(statement) query_request = {'statements': statements} query_request = json.dumps(query_request).encode() response = requests.post(url, data=query_request) raise_for_update_errors(response) return response
def do_query_update_batch(queries): '\n Execute multiple query statements,\n Return result and statistics for each of the statements\n ' url = get_neo4j_api_url('/db/data/transaction/commit') statements = [] for query in queries: statement = {'statement': query['statement'], 'parameters': query['params'], 'includeStats': True} statements.append(statement) query_request = {'statements': statements} query_request = json.dumps(query_request).encode() response = requests.post(url, data=query_request) raise_for_update_errors(response) return response<|docstring|>Execute multiple query statements, Return result and statistics for each of the statements<|endoftext|>
85335c9a6ed086216b9dc5466c137d9812d15b03888e07c413c719aa892afc04
def prepare_query(query, params): 'Replace template query {parameter}s with the\n values provided in the dictionary\n ' return query.format(**params)
Replace template query {parameter}s with the values provided in the dictionary
graph_data/neo4j.py
prepare_query
andreisirghi/graph-data
0
python
def prepare_query(query, params): 'Replace template query {parameter}s with the\n values provided in the dictionary\n ' return query.format(**params)
def prepare_query(query, params): 'Replace template query {parameter}s with the\n values provided in the dictionary\n ' return query.format(**params)<|docstring|>Replace template query {parameter}s with the values provided in the dictionary<|endoftext|>
53ebf6d2a70341235b63c432b68d1626f72917ababc0b4ba85c253ebb498037d
def create_schema(): '\n Create database indexes and uniqueness constraints\n ' logger.info('graph.db.schema.prepare') params = {'type': 'Student', 'property': 'idno'} do_query_update(prepare_query(Q_CR_UNIQUE_CONSTRAINT, params)) params = {'type': 'Characteristic', 'property': 'id'} do_query_update(prepare_query(Q_CR_UNIQUE_CONSTRAINT, params)) params['property'] = 'type' do_query_update(prepare_query(Q_CR_INDEX, params)) params['property'] = 'value' do_query_update(prepare_query(Q_CR_INDEX, params)) logger.info('graph.db.schema.created')
Create database indexes and uniqueness constraints
graph_data/neo4j.py
create_schema
andreisirghi/graph-data
0
python
def create_schema(): '\n \n ' logger.info('graph.db.schema.prepare') params = {'type': 'Student', 'property': 'idno'} do_query_update(prepare_query(Q_CR_UNIQUE_CONSTRAINT, params)) params = {'type': 'Characteristic', 'property': 'id'} do_query_update(prepare_query(Q_CR_UNIQUE_CONSTRAINT, params)) params['property'] = 'type' do_query_update(prepare_query(Q_CR_INDEX, params)) params['property'] = 'value' do_query_update(prepare_query(Q_CR_INDEX, params)) logger.info('graph.db.schema.created')
def create_schema(): '\n \n ' logger.info('graph.db.schema.prepare') params = {'type': 'Student', 'property': 'idno'} do_query_update(prepare_query(Q_CR_UNIQUE_CONSTRAINT, params)) params = {'type': 'Characteristic', 'property': 'id'} do_query_update(prepare_query(Q_CR_UNIQUE_CONSTRAINT, params)) params['property'] = 'type' do_query_update(prepare_query(Q_CR_INDEX, params)) params['property'] = 'value' do_query_update(prepare_query(Q_CR_INDEX, params)) logger.info('graph.db.schema.created')<|docstring|>Create database indexes and uniqueness constraints<|endoftext|>
dab0634bdfbabab7ee9efe237dd7618df5eac33271d0a0bc767e8c0bacd26355
def extractSelfishTranslation(item): "\n\t'Selfish Translation'\n\t" (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())): return None return False
'Selfish Translation'
WebMirror/management/rss_parser_funcs/feed_parse_extractSelfishTranslation.py
extractSelfishTranslation
fake-name/ReadableWebProxy
193
python
def extractSelfishTranslation(item): "\n\t\n\t" (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())): return None return False
def extractSelfishTranslation(item): "\n\t\n\t" (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol or frag)) or ('preview' in item['title'].lower())): return None return False<|docstring|>'Selfish Translation'<|endoftext|>
eb23890ca2a4cd549f51c80ae6767f77ce0570c8f9091c389681596e55676551
@swagger_auto_schema(request_body=serializers.FindingNoteSerializer, responses={status.HTTP_200_OK: ''}) @action(detail=True, methods=['patch']) def remove_note(self, request, pk=None): 'Remove Note From Finding Note' finding = get_object_or_404(Finding.objects, id=pk) notes = finding.notes.all() if request.data['note_id']: note = get_object_or_404(Notes.objects, id=request.data['note_id']) if (note not in notes): return Response({'error': 'Selected Note is not assigned to this Finding'}, status=status.HTTP_400_BAD_REQUEST) else: return Response({'error': "('note_id') parameter missing"}, status=status.HTTP_400_BAD_REQUEST) if ((note.author.username == request.user.username) or request.user.is_staff): finding.notes.remove(note) note.delete() else: return Response({'error': "Delete Failed, You are not the Note's author"}, status=status.HTTP_400_BAD_REQUEST) return Response({'Success': 'Selected Note has been Removed successfully'}, status=status.HTTP_200_OK)
Remove Note From Finding Note
dojo/api_v2/views.py
remove_note
satackey/django-DefectDojo
0
python
@swagger_auto_schema(request_body=serializers.FindingNoteSerializer, responses={status.HTTP_200_OK: }) @action(detail=True, methods=['patch']) def remove_note(self, request, pk=None): finding = get_object_or_404(Finding.objects, id=pk) notes = finding.notes.all() if request.data['note_id']: note = get_object_or_404(Notes.objects, id=request.data['note_id']) if (note not in notes): return Response({'error': 'Selected Note is not assigned to this Finding'}, status=status.HTTP_400_BAD_REQUEST) else: return Response({'error': "('note_id') parameter missing"}, status=status.HTTP_400_BAD_REQUEST) if ((note.author.username == request.user.username) or request.user.is_staff): finding.notes.remove(note) note.delete() else: return Response({'error': "Delete Failed, You are not the Note's author"}, status=status.HTTP_400_BAD_REQUEST) return Response({'Success': 'Selected Note has been Removed successfully'}, status=status.HTTP_200_OK)
@swagger_auto_schema(request_body=serializers.FindingNoteSerializer, responses={status.HTTP_200_OK: }) @action(detail=True, methods=['patch']) def remove_note(self, request, pk=None): finding = get_object_or_404(Finding.objects, id=pk) notes = finding.notes.all() if request.data['note_id']: note = get_object_or_404(Notes.objects, id=request.data['note_id']) if (note not in notes): return Response({'error': 'Selected Note is not assigned to this Finding'}, status=status.HTTP_400_BAD_REQUEST) else: return Response({'error': "('note_id') parameter missing"}, status=status.HTTP_400_BAD_REQUEST) if ((note.author.username == request.user.username) or request.user.is_staff): finding.notes.remove(note) note.delete() else: return Response({'error': "Delete Failed, You are not the Note's author"}, status=status.HTTP_400_BAD_REQUEST) return Response({'Success': 'Selected Note has been Removed successfully'}, status=status.HTTP_200_OK)<|docstring|>Remove Note From Finding Note<|endoftext|>
84655a3cd2a9b80151fa58808c0e62301e18ac44673fd77995b227c196a9e485
@swagger_auto_schema(responses={status.HTTP_200_OK: ''}, methods=['put', 'patch'], request_body=serializers.TagSerializer) @action(detail=True, methods=['put', 'patch']) def remove_tags(self, request, pk=None): ' Remove Tag(s) from finding list of tags ' finding = get_object_or_404(Finding.objects, id=pk) delete_tags = serializers.TagSerializer(data=request.data) if delete_tags.is_valid(): all_tags = finding.tags all_tags = serializers.TagSerializer({'tags': all_tags}).data['tags'] del_tags = delete_tags.validated_data['tags'] if (len(del_tags) < 1): return Response({'error': 'Empty Tag List Not Allowed'}, status=status.HTTP_400_BAD_REQUEST) for tag in del_tags: if (tag not in all_tags): return Response({'error': "'{}' is not a valid tag in list".format(tag)}, status=status.HTTP_400_BAD_REQUEST) all_tags.remove(tag) t = ', '.join(all_tags) finding.tags = t finding.save() return Response({'success': 'Tag(s) Removed'}, status=status.HTTP_200_OK) else: return Response(delete_tags.errors, status=status.HTTP_400_BAD_REQUEST)
Remove Tag(s) from finding list of tags
dojo/api_v2/views.py
remove_tags
satackey/django-DefectDojo
0
python
@swagger_auto_schema(responses={status.HTTP_200_OK: }, methods=['put', 'patch'], request_body=serializers.TagSerializer) @action(detail=True, methods=['put', 'patch']) def remove_tags(self, request, pk=None): ' ' finding = get_object_or_404(Finding.objects, id=pk) delete_tags = serializers.TagSerializer(data=request.data) if delete_tags.is_valid(): all_tags = finding.tags all_tags = serializers.TagSerializer({'tags': all_tags}).data['tags'] del_tags = delete_tags.validated_data['tags'] if (len(del_tags) < 1): return Response({'error': 'Empty Tag List Not Allowed'}, status=status.HTTP_400_BAD_REQUEST) for tag in del_tags: if (tag not in all_tags): return Response({'error': "'{}' is not a valid tag in list".format(tag)}, status=status.HTTP_400_BAD_REQUEST) all_tags.remove(tag) t = ', '.join(all_tags) finding.tags = t finding.save() return Response({'success': 'Tag(s) Removed'}, status=status.HTTP_200_OK) else: return Response(delete_tags.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(responses={status.HTTP_200_OK: }, methods=['put', 'patch'], request_body=serializers.TagSerializer) @action(detail=True, methods=['put', 'patch']) def remove_tags(self, request, pk=None): ' ' finding = get_object_or_404(Finding.objects, id=pk) delete_tags = serializers.TagSerializer(data=request.data) if delete_tags.is_valid(): all_tags = finding.tags all_tags = serializers.TagSerializer({'tags': all_tags}).data['tags'] del_tags = delete_tags.validated_data['tags'] if (len(del_tags) < 1): return Response({'error': 'Empty Tag List Not Allowed'}, status=status.HTTP_400_BAD_REQUEST) for tag in del_tags: if (tag not in all_tags): return Response({'error': "'{}' is not a valid tag in list".format(tag)}, status=status.HTTP_400_BAD_REQUEST) all_tags.remove(tag) t = ', '.join(all_tags) finding.tags = t finding.save() return Response({'success': 'Tag(s) Removed'}, status=status.HTTP_200_OK) else: return Response(delete_tags.errors, status=status.HTTP_400_BAD_REQUEST)<|docstring|>Remove Tag(s) from finding list of tags<|endoftext|>
8a03c24c02805d37927e6a9e7db4fbe3ec6f256bace2efe9491085e454f4e3cb
def __clone_cloudbiolinux(cbl_config): 'Clone CloudBioLinux to a temporary directory.\n\n TODO: Support particular revision.\n ' cbl_url = cbl_config.get('repository', DEFAULT_CBL_URL) cbl_dir = mkdtemp(suffix='cbl') check_call(['git', 'clone', cbl_url, cbl_dir]) revision = cbl_config.get('revision', None) if revision: git_dir = os.path.join(cbl_dir, '.git') check_call(['git', '--work-tree', cbl_dir, '--git-dir', git_dir, 'checkout', revision]) return cbl_dir
Clone CloudBioLinux to a temporary directory. TODO: Support particular revision.
deploy/test_install_galaxy_tool.py
__clone_cloudbiolinux
bcbio/cloudbiolinux
122
python
def __clone_cloudbiolinux(cbl_config): 'Clone CloudBioLinux to a temporary directory.\n\n TODO: Support particular revision.\n ' cbl_url = cbl_config.get('repository', DEFAULT_CBL_URL) cbl_dir = mkdtemp(suffix='cbl') check_call(['git', 'clone', cbl_url, cbl_dir]) revision = cbl_config.get('revision', None) if revision: git_dir = os.path.join(cbl_dir, '.git') check_call(['git', '--work-tree', cbl_dir, '--git-dir', git_dir, 'checkout', revision]) return cbl_dir
def __clone_cloudbiolinux(cbl_config): 'Clone CloudBioLinux to a temporary directory.\n\n TODO: Support particular revision.\n ' cbl_url = cbl_config.get('repository', DEFAULT_CBL_URL) cbl_dir = mkdtemp(suffix='cbl') check_call(['git', 'clone', cbl_url, cbl_dir]) revision = cbl_config.get('revision', None) if revision: git_dir = os.path.join(cbl_dir, '.git') check_call(['git', '--work-tree', cbl_dir, '--git-dir', git_dir, 'checkout', revision]) return cbl_dir<|docstring|>Clone CloudBioLinux to a temporary directory. TODO: Support particular revision.<|endoftext|>
87f9698fd8ac07ced4ee449c8260c136e6bfa4009bf58c919ededbd8e429243c
def hexdump(source: bytearray, length: int=16, separator: string='.', show_raw: bool=False, base: int=0) -> string: '\n Produces a `hexdump` command like output version of the bytearray given.\n ' result = [] for i in range(0, len(source), length): s = source[i:(i + length)] hexa = ' '.join([('%02X' % c) for c in s]) text = ''.join([(chr(c) if (32 <= c < 127) else separator) for c in s]) if show_raw: result.append(hexa) else: result.append(('%#-.*x %-*s %s' % (16, (base + i), (3 * length), hexa, text))) return '\n'.join(result)
Produces a `hexdump` command like output version of the bytearray given.
cemu/utils.py
hexdump
hugsy/cemu
823
python
def hexdump(source: bytearray, length: int=16, separator: string='.', show_raw: bool=False, base: int=0) -> string: '\n \n ' result = [] for i in range(0, len(source), length): s = source[i:(i + length)] hexa = ' '.join([('%02X' % c) for c in s]) text = .join([(chr(c) if (32 <= c < 127) else separator) for c in s]) if show_raw: result.append(hexa) else: result.append(('%#-.*x %-*s %s' % (16, (base + i), (3 * length), hexa, text))) return '\n'.join(result)
def hexdump(source: bytearray, length: int=16, separator: string='.', show_raw: bool=False, base: int=0) -> string: '\n \n ' result = [] for i in range(0, len(source), length): s = source[i:(i + length)] hexa = ' '.join([('%02X' % c) for c in s]) text = .join([(chr(c) if (32 <= c < 127) else separator) for c in s]) if show_raw: result.append(hexa) else: result.append(('%#-.*x %-*s %s' % (16, (base + i), (3 * length), hexa, text))) return '\n'.join(result)<|docstring|>Produces a `hexdump` command like output version of the bytearray given.<|endoftext|>
3b06c6aad27a973e2b78b64ce46e19002ffdac4d1b4bd9453f7a24a430f472c8
def assemble(asm_code: string, mode: int) -> Tuple[(bytearray, int)]: "\n Helper function to assemble code receive in parameter `asm_code` using Keystone.\n\n @param asm_code : assembly code in bytes (multiple instructions must be separated by ';')\n @param mode : defines the mode to use Keystone with\n @return a tuple of bytecodes as bytearray, along with the number of instruction compiled. If failed, the\n bytearray will be empty, the count of instruction will be the negative number for the faulty line.\n " (arch, mode, endian) = get_arch_mode('keystone', mode) ks = keystone.Ks(arch, (mode | endian)) if (is_x86(mode) and (mode.syntax == Syntax.ATT)): ks.syntax = keystone.KS_OPT_SYNTAX_ATT try: (bytecode, cnt) = ks.asm(asm_code, as_bytes=True) except keystone.keystone.KsError as kse: return (b'', kse.get_asm_count()) return (bytecode, cnt)
Helper function to assemble code receive in parameter `asm_code` using Keystone. @param asm_code : assembly code in bytes (multiple instructions must be separated by ';') @param mode : defines the mode to use Keystone with @return a tuple of bytecodes as bytearray, along with the number of instruction compiled. If failed, the bytearray will be empty, the count of instruction will be the negative number for the faulty line.
cemu/utils.py
assemble
hugsy/cemu
823
python
def assemble(asm_code: string, mode: int) -> Tuple[(bytearray, int)]: "\n Helper function to assemble code receive in parameter `asm_code` using Keystone.\n\n @param asm_code : assembly code in bytes (multiple instructions must be separated by ';')\n @param mode : defines the mode to use Keystone with\n @return a tuple of bytecodes as bytearray, along with the number of instruction compiled. If failed, the\n bytearray will be empty, the count of instruction will be the negative number for the faulty line.\n " (arch, mode, endian) = get_arch_mode('keystone', mode) ks = keystone.Ks(arch, (mode | endian)) if (is_x86(mode) and (mode.syntax == Syntax.ATT)): ks.syntax = keystone.KS_OPT_SYNTAX_ATT try: (bytecode, cnt) = ks.asm(asm_code, as_bytes=True) except keystone.keystone.KsError as kse: return (b, kse.get_asm_count()) return (bytecode, cnt)
def assemble(asm_code: string, mode: int) -> Tuple[(bytearray, int)]: "\n Helper function to assemble code receive in parameter `asm_code` using Keystone.\n\n @param asm_code : assembly code in bytes (multiple instructions must be separated by ';')\n @param mode : defines the mode to use Keystone with\n @return a tuple of bytecodes as bytearray, along with the number of instruction compiled. If failed, the\n bytearray will be empty, the count of instruction will be the negative number for the faulty line.\n " (arch, mode, endian) = get_arch_mode('keystone', mode) ks = keystone.Ks(arch, (mode | endian)) if (is_x86(mode) and (mode.syntax == Syntax.ATT)): ks.syntax = keystone.KS_OPT_SYNTAX_ATT try: (bytecode, cnt) = ks.asm(asm_code, as_bytes=True) except keystone.keystone.KsError as kse: return (b, kse.get_asm_count()) return (bytecode, cnt)<|docstring|>Helper function to assemble code receive in parameter `asm_code` using Keystone. @param asm_code : assembly code in bytes (multiple instructions must be separated by ';') @param mode : defines the mode to use Keystone with @return a tuple of bytecodes as bytearray, along with the number of instruction compiled. If failed, the bytearray will be empty, the count of instruction will be the negative number for the faulty line.<|endoftext|>
860cab0f1dd580a080047999b23ea93901d5074c117de013765b6166581c8d85
def get_cursor_row_number(widget: QTextEdit) -> int: '\n Get the cursor row number from the QTextEdit widget\n ' assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return text[:pos].count('\n')
Get the cursor row number from the QTextEdit widget
cemu/utils.py
get_cursor_row_number
hugsy/cemu
823
python
def get_cursor_row_number(widget: QTextEdit) -> int: '\n \n ' assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return text[:pos].count('\n')
def get_cursor_row_number(widget: QTextEdit) -> int: '\n \n ' assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return text[:pos].count('\n')<|docstring|>Get the cursor row number from the QTextEdit widget<|endoftext|>
4dc9735736835ac583bbb9c41ee85deb348c40c7a5e2b85aa13e3bac76c87753
def get_cursor_column_number(widget: QTextEdit) -> int: '\n Get the cursor column number from the QTextEdit widget\n ' assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return len(text[:pos].split('\n')[(- 1)])
Get the cursor column number from the QTextEdit widget
cemu/utils.py
get_cursor_column_number
hugsy/cemu
823
python
def get_cursor_column_number(widget: QTextEdit) -> int: '\n \n ' assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return len(text[:pos].split('\n')[(- 1)])
def get_cursor_column_number(widget: QTextEdit) -> int: '\n \n ' assert isinstance(widget, QTextEdit) pos = widget.textCursor().position() text = widget.toPlainText() return len(text[:pos].split('\n')[(- 1)])<|docstring|>Get the cursor column number from the QTextEdit widget<|endoftext|>
0ef626053e120fca6ec44ba60510a205ebd86638fab9ce863049d3f91ff0cd76
def get_cursor_position(widget: QTextEdit) -> Tuple[(int, int)]: '\n Returns the position of a cursor like (nb_row, nb_col) from a textedit widget\n ' return (get_cursor_row_number(widget), get_cursor_column_number(widget))
Returns the position of a cursor like (nb_row, nb_col) from a textedit widget
cemu/utils.py
get_cursor_position
hugsy/cemu
823
python
def get_cursor_position(widget: QTextEdit) -> Tuple[(int, int)]: '\n \n ' return (get_cursor_row_number(widget), get_cursor_column_number(widget))
def get_cursor_position(widget: QTextEdit) -> Tuple[(int, int)]: '\n \n ' return (get_cursor_row_number(widget), get_cursor_column_number(widget))<|docstring|>Returns the position of a cursor like (nb_row, nb_col) from a textedit widget<|endoftext|>
fcab64d38b7d6ed9a9564bd705b1d2af789bcd5b746850d59d01f686f1950fe3
def generate_random_string(length: int) -> str: '\n Returns a random string\n ' charset = (string.ascii_letters + string.digits) return ''.join((random.choice(charset) for i in range(length)))
Returns a random string
cemu/utils.py
generate_random_string
hugsy/cemu
823
python
def generate_random_string(length: int) -> str: '\n \n ' charset = (string.ascii_letters + string.digits) return .join((random.choice(charset) for i in range(length)))
def generate_random_string(length: int) -> str: '\n \n ' charset = (string.ascii_letters + string.digits) return .join((random.choice(charset) for i in range(length)))<|docstring|>Returns a random string<|endoftext|>
17e48ff9acb1fa95ae0898cbc858d614259bfcbbaf0edb8d3e2eff70fc641629
def get_num_lines(file: Union[(str, TextIO)]) -> int: '\n Gets the number of lines in a file. Uses ``wc -l`` which seems to be the fastest method for larger files.\n Specifically counts the number of newline characters (thus no trailing newline may reduce count by 1)\n\n :param file: the file to get line counts for. Either a string or an open text file (returned by ``open()``)\n :raises CalledProcessError: if there was an error with the subprocess\n :return: the number of lines in the file as an int\n ' out = subprocess.run(args=['wc', '-l', (file if isinstance(file, str) else file.name)], check=True, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE) return int(out.stdout.split()[0])
Gets the number of lines in a file. Uses ``wc -l`` which seems to be the fastest method for larger files. Specifically counts the number of newline characters (thus no trailing newline may reduce count by 1) :param file: the file to get line counts for. Either a string or an open text file (returned by ``open()``) :raises CalledProcessError: if there was an error with the subprocess :return: the number of lines in the file as an int
shared/fileutils.py
get_num_lines
byu-imaal/dns-cookies-pam21
0
python
def get_num_lines(file: Union[(str, TextIO)]) -> int: '\n Gets the number of lines in a file. Uses ``wc -l`` which seems to be the fastest method for larger files.\n Specifically counts the number of newline characters (thus no trailing newline may reduce count by 1)\n\n :param file: the file to get line counts for. Either a string or an open text file (returned by ``open()``)\n :raises CalledProcessError: if there was an error with the subprocess\n :return: the number of lines in the file as an int\n ' out = subprocess.run(args=['wc', '-l', (file if isinstance(file, str) else file.name)], check=True, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE) return int(out.stdout.split()[0])
def get_num_lines(file: Union[(str, TextIO)]) -> int: '\n Gets the number of lines in a file. Uses ``wc -l`` which seems to be the fastest method for larger files.\n Specifically counts the number of newline characters (thus no trailing newline may reduce count by 1)\n\n :param file: the file to get line counts for. Either a string or an open text file (returned by ``open()``)\n :raises CalledProcessError: if there was an error with the subprocess\n :return: the number of lines in the file as an int\n ' out = subprocess.run(args=['wc', '-l', (file if isinstance(file, str) else file.name)], check=True, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE) return int(out.stdout.split()[0])<|docstring|>Gets the number of lines in a file. Uses ``wc -l`` which seems to be the fastest method for larger files. Specifically counts the number of newline characters (thus no trailing newline may reduce count by 1) :param file: the file to get line counts for. Either a string or an open text file (returned by ``open()``) :raises CalledProcessError: if there was an error with the subprocess :return: the number of lines in the file as an int<|endoftext|>
849a256fd3de55b3a98f6a65a6419113b63d5fbf338dcedcd0e13fbe143608a5
def get_free_filename(filename: str, format_str: str='{file}_{uniq}.{ext}'): '\n Get a filename that\'s free so we don\'t overwrite anything. Appends a consecutive digit to the end of the name\n until it is unique. Note that this method respects file extensions.\n\n Example: if there is already ``example.txt`` in a directory,\n\n calling ``get_free_filename("example".txt, format_str="{uniq:02d}__{file}")`` will return: ``01__example.txt``\n\n calling ``get_free_filename("example".txt, format_str="{file}.{uniq}.{ext}")`` will return: ``example.1.txt``\n\n :param filename: the desired filename\n :param format_str: The string to format the filename with. Can include ``{file}`` for the filename, ``{uniq}``\n for the unique id and optionally ``{ext}`` to include the file extension separately\n :return: the orginal filename if it doesn\'t exist, otherwise a filename following the ``format_str`` pattern\n ' fmt_keys = [t[1] for t in string.Formatter().parse(format_str) if (t[1] is not None)] if any(((key not in ['file', 'uniq', 'ext']) for key in fmt_keys)): raise KeyError('Invalid key for file format. Allowed keys are {file, uniq, ext}') n = 1 if (('.' in filename) and ('ext' in format_str)): parts = filename.split('.') (original, ext) = ('.'.join(parts[:(- 1)]), parts[(- 1)]) else: (original, ext) = (filename, '') while os.path.isfile(filename): filename = format_str.format(file=original, uniq=n, ext=ext) n += 1 return filename
Get a filename that's free so we don't overwrite anything. Appends a consecutive digit to the end of the name until it is unique. Note that this method respects file extensions. Example: if there is already ``example.txt`` in a directory, calling ``get_free_filename("example".txt, format_str="{uniq:02d}__{file}")`` will return: ``01__example.txt`` calling ``get_free_filename("example".txt, format_str="{file}.{uniq}.{ext}")`` will return: ``example.1.txt`` :param filename: the desired filename :param format_str: The string to format the filename with. Can include ``{file}`` for the filename, ``{uniq}`` for the unique id and optionally ``{ext}`` to include the file extension separately :return: the orginal filename if it doesn't exist, otherwise a filename following the ``format_str`` pattern
shared/fileutils.py
get_free_filename
byu-imaal/dns-cookies-pam21
0
python
def get_free_filename(filename: str, format_str: str='{file}_{uniq}.{ext}'): '\n Get a filename that\'s free so we don\'t overwrite anything. Appends a consecutive digit to the end of the name\n until it is unique. Note that this method respects file extensions.\n\n Example: if there is already ``example.txt`` in a directory,\n\n calling ``get_free_filename("example".txt, format_str="{uniq:02d}__{file}")`` will return: ``01__example.txt``\n\n calling ``get_free_filename("example".txt, format_str="{file}.{uniq}.{ext}")`` will return: ``example.1.txt``\n\n :param filename: the desired filename\n :param format_str: The string to format the filename with. Can include ``{file}`` for the filename, ``{uniq}``\n for the unique id and optionally ``{ext}`` to include the file extension separately\n :return: the orginal filename if it doesn\'t exist, otherwise a filename following the ``format_str`` pattern\n ' fmt_keys = [t[1] for t in string.Formatter().parse(format_str) if (t[1] is not None)] if any(((key not in ['file', 'uniq', 'ext']) for key in fmt_keys)): raise KeyError('Invalid key for file format. Allowed keys are {file, uniq, ext}') n = 1 if (('.' in filename) and ('ext' in format_str)): parts = filename.split('.') (original, ext) = ('.'.join(parts[:(- 1)]), parts[(- 1)]) else: (original, ext) = (filename, ) while os.path.isfile(filename): filename = format_str.format(file=original, uniq=n, ext=ext) n += 1 return filename
def get_free_filename(filename: str, format_str: str='{file}_{uniq}.{ext}'): '\n Get a filename that\'s free so we don\'t overwrite anything. Appends a consecutive digit to the end of the name\n until it is unique. Note that this method respects file extensions.\n\n Example: if there is already ``example.txt`` in a directory,\n\n calling ``get_free_filename("example".txt, format_str="{uniq:02d}__{file}")`` will return: ``01__example.txt``\n\n calling ``get_free_filename("example".txt, format_str="{file}.{uniq}.{ext}")`` will return: ``example.1.txt``\n\n :param filename: the desired filename\n :param format_str: The string to format the filename with. Can include ``{file}`` for the filename, ``{uniq}``\n for the unique id and optionally ``{ext}`` to include the file extension separately\n :return: the orginal filename if it doesn\'t exist, otherwise a filename following the ``format_str`` pattern\n ' fmt_keys = [t[1] for t in string.Formatter().parse(format_str) if (t[1] is not None)] if any(((key not in ['file', 'uniq', 'ext']) for key in fmt_keys)): raise KeyError('Invalid key for file format. Allowed keys are {file, uniq, ext}') n = 1 if (('.' in filename) and ('ext' in format_str)): parts = filename.split('.') (original, ext) = ('.'.join(parts[:(- 1)]), parts[(- 1)]) else: (original, ext) = (filename, ) while os.path.isfile(filename): filename = format_str.format(file=original, uniq=n, ext=ext) n += 1 return filename<|docstring|>Get a filename that's free so we don't overwrite anything. Appends a consecutive digit to the end of the name until it is unique. Note that this method respects file extensions. Example: if there is already ``example.txt`` in a directory, calling ``get_free_filename("example".txt, format_str="{uniq:02d}__{file}")`` will return: ``01__example.txt`` calling ``get_free_filename("example".txt, format_str="{file}.{uniq}.{ext}")`` will return: ``example.1.txt`` :param filename: the desired filename :param format_str: The string to format the filename with. Can include ``{file}`` for the filename, ``{uniq}`` for the unique id and optionally ``{ext}`` to include the file extension separately :return: the orginal filename if it doesn't exist, otherwise a filename following the ``format_str`` pattern<|endoftext|>
8dc2fa7d2033db42699bd3dc17054984bef133cc8d84742b223a58abad62bad3
def jl_iter(file: TextIO, with_tqdm=True) -> iter: "\n Create a json lines iterator from an open file.\n\n Example usage:\n with open('file.jsonl', 'r') as in_file:\n for line in jl_iter(in_file):\n print(line.keys())\n\n :param file: the open file to be used\n :param with_tqdm: if set, wraps iterator in tqdm progress bar\n :return: an iterator of dictionaries\n " if with_tqdm: return tqdm(map(json.loads, file), total=get_num_lines(file)) else: return map(json.loads, file)
Create a json lines iterator from an open file. Example usage: with open('file.jsonl', 'r') as in_file: for line in jl_iter(in_file): print(line.keys()) :param file: the open file to be used :param with_tqdm: if set, wraps iterator in tqdm progress bar :return: an iterator of dictionaries
shared/fileutils.py
jl_iter
byu-imaal/dns-cookies-pam21
0
python
def jl_iter(file: TextIO, with_tqdm=True) -> iter: "\n Create a json lines iterator from an open file.\n\n Example usage:\n with open('file.jsonl', 'r') as in_file:\n for line in jl_iter(in_file):\n print(line.keys())\n\n :param file: the open file to be used\n :param with_tqdm: if set, wraps iterator in tqdm progress bar\n :return: an iterator of dictionaries\n " if with_tqdm: return tqdm(map(json.loads, file), total=get_num_lines(file)) else: return map(json.loads, file)
def jl_iter(file: TextIO, with_tqdm=True) -> iter: "\n Create a json lines iterator from an open file.\n\n Example usage:\n with open('file.jsonl', 'r') as in_file:\n for line in jl_iter(in_file):\n print(line.keys())\n\n :param file: the open file to be used\n :param with_tqdm: if set, wraps iterator in tqdm progress bar\n :return: an iterator of dictionaries\n " if with_tqdm: return tqdm(map(json.loads, file), total=get_num_lines(file)) else: return map(json.loads, file)<|docstring|>Create a json lines iterator from an open file. Example usage: with open('file.jsonl', 'r') as in_file: for line in jl_iter(in_file): print(line.keys()) :param file: the open file to be used :param with_tqdm: if set, wraps iterator in tqdm progress bar :return: an iterator of dictionaries<|endoftext|>
3b0530d90a1f2634612f86006359b562fdcc154a735286b16977203f4bd1eca3
def __init__(self, **kwargs): " Initializes a ExternalService instance\n\n Notes:\n You can specify all parameters while calling this methods.\n A special argument named `data` will enable you to load the\n object from a Python dictionary\n\n Examples:\n >>> externalservice = NUExternalService(id=u'xxxx-xxx-xxx-xxx', name=u'ExternalService')\n >>> externalservice = NUExternalService(data=my_dict)\n " super(NUExternalService, self).__init__() self._description = None self._direction = None self._entity_scope = None self._external_id = None self._last_updated_by = None self._name = None self._service_type = None self._stage = None self.expose_attribute(local_name='description', remote_name='description', attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name='direction', remote_name='direction', attribute_type=str, is_required=False, is_unique=False, choices=[u'INGRESS']) self.expose_attribute(local_name='entity_scope', remote_name='entityScope', attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name='external_id', remote_name='externalID', attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name='last_updated_by', remote_name='lastUpdatedBy', attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name='name', remote_name='name', attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name='service_type', remote_name='serviceType', attribute_type=str, is_required=True, is_unique=False, choices=[u'L2', u'L3']) self.expose_attribute(local_name='stage', remote_name='stage', attribute_type=str, is_required=False, is_unique=False, choices=[u'START']) self.end_points = NUEndPointsFetcher.fetcher_with_object(parent_object=self, relationship='child') self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship='child') self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship='child') self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship='child') self.metadata_tags = NUMetadataTagsFetcher.fetcher_with_object(parent_object=self, relationship='child') self._compute_args(**kwargs)
Initializes a ExternalService instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> externalservice = NUExternalService(id=u'xxxx-xxx-xxx-xxx', name=u'ExternalService') >>> externalservice = NUExternalService(data=my_dict)
vspk/v4_0/nuexternalservice.py
__init__
cldelcourt/vspk-python
0
python
def __init__(self, **kwargs): " Initializes a ExternalService instance\n\n Notes:\n You can specify all parameters while calling this methods.\n A special argument named `data` will enable you to load the\n object from a Python dictionary\n\n Examples:\n >>> externalservice = NUExternalService(id=u'xxxx-xxx-xxx-xxx', name=u'ExternalService')\n >>> externalservice = NUExternalService(data=my_dict)\n " super(NUExternalService, self).__init__() self._description = None self._direction = None self._entity_scope = None self._external_id = None self._last_updated_by = None self._name = None self._service_type = None self._stage = None self.expose_attribute(local_name='description', remote_name='description', attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name='direction', remote_name='direction', attribute_type=str, is_required=False, is_unique=False, choices=[u'INGRESS']) self.expose_attribute(local_name='entity_scope', remote_name='entityScope', attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name='external_id', remote_name='externalID', attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name='last_updated_by', remote_name='lastUpdatedBy', attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name='name', remote_name='name', attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name='service_type', remote_name='serviceType', attribute_type=str, is_required=True, is_unique=False, choices=[u'L2', u'L3']) self.expose_attribute(local_name='stage', remote_name='stage', attribute_type=str, is_required=False, is_unique=False, choices=[u'START']) self.end_points = NUEndPointsFetcher.fetcher_with_object(parent_object=self, relationship='child') self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship='child') self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship='child') self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship='child') self.metadata_tags = NUMetadataTagsFetcher.fetcher_with_object(parent_object=self, relationship='child') self._compute_args(**kwargs)
def __init__(self, **kwargs): " Initializes a ExternalService instance\n\n Notes:\n You can specify all parameters while calling this methods.\n A special argument named `data` will enable you to load the\n object from a Python dictionary\n\n Examples:\n >>> externalservice = NUExternalService(id=u'xxxx-xxx-xxx-xxx', name=u'ExternalService')\n >>> externalservice = NUExternalService(data=my_dict)\n " super(NUExternalService, self).__init__() self._description = None self._direction = None self._entity_scope = None self._external_id = None self._last_updated_by = None self._name = None self._service_type = None self._stage = None self.expose_attribute(local_name='description', remote_name='description', attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name='direction', remote_name='direction', attribute_type=str, is_required=False, is_unique=False, choices=[u'INGRESS']) self.expose_attribute(local_name='entity_scope', remote_name='entityScope', attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name='external_id', remote_name='externalID', attribute_type=str, is_required=False, is_unique=True) self.expose_attribute(local_name='last_updated_by', remote_name='lastUpdatedBy', attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name='name', remote_name='name', attribute_type=str, is_required=True, is_unique=False) self.expose_attribute(local_name='service_type', remote_name='serviceType', attribute_type=str, is_required=True, is_unique=False, choices=[u'L2', u'L3']) self.expose_attribute(local_name='stage', remote_name='stage', attribute_type=str, is_required=False, is_unique=False, choices=[u'START']) self.end_points = NUEndPointsFetcher.fetcher_with_object(parent_object=self, relationship='child') self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship='child') self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship='child') self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship='child') self.metadata_tags = NUMetadataTagsFetcher.fetcher_with_object(parent_object=self, relationship='child') self._compute_args(**kwargs)<|docstring|>Initializes a ExternalService instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> externalservice = NUExternalService(id=u'xxxx-xxx-xxx-xxx', name=u'ExternalService') >>> externalservice = NUExternalService(data=my_dict)<|endoftext|>
02dfe5a92b4dfda89c6a1ec4d29e6934e9a90b05b76032e4cceb8e945498f532
@property def description(self): ' Get description value.\n\n Notes:\n Description of the External Service.\n\n \n ' return self._description
Get description value. Notes: Description of the External Service.
vspk/v4_0/nuexternalservice.py
description
cldelcourt/vspk-python
0
python
@property def description(self): ' Get description value.\n\n Notes:\n Description of the External Service.\n\n \n ' return self._description
@property def description(self): ' Get description value.\n\n Notes:\n Description of the External Service.\n\n \n ' return self._description<|docstring|>Get description value. Notes: Description of the External Service.<|endoftext|>
348e4ae1803823fe13e9439a89d695465e61240312059675c64f23ca185d3f42
@description.setter def description(self, value): ' Set description value.\n\n Notes:\n Description of the External Service.\n\n \n ' self._description = value
Set description value. Notes: Description of the External Service.
vspk/v4_0/nuexternalservice.py
description
cldelcourt/vspk-python
0
python
@description.setter def description(self, value): ' Set description value.\n\n Notes:\n Description of the External Service.\n\n \n ' self._description = value
@description.setter def description(self, value): ' Set description value.\n\n Notes:\n Description of the External Service.\n\n \n ' self._description = value<|docstring|>Set description value. Notes: Description of the External Service.<|endoftext|>
dc6ba2acd18afdeb4ef2c97d9a1c430025e170646259760fa7a15f91a9cdcd74
@property def direction(self): ' Get direction value.\n\n Notes:\n Direction\n\n \n ' return self._direction
Get direction value. Notes: Direction
vspk/v4_0/nuexternalservice.py
direction
cldelcourt/vspk-python
0
python
@property def direction(self): ' Get direction value.\n\n Notes:\n Direction\n\n \n ' return self._direction
@property def direction(self): ' Get direction value.\n\n Notes:\n Direction\n\n \n ' return self._direction<|docstring|>Get direction value. Notes: Direction<|endoftext|>
28d23189071bd5b2cbf824fb8c4be02ab8fb1b7610d4c3059688a8ea723f2c3b
@direction.setter def direction(self, value): ' Set direction value.\n\n Notes:\n Direction\n\n \n ' self._direction = value
Set direction value. Notes: Direction
vspk/v4_0/nuexternalservice.py
direction
cldelcourt/vspk-python
0
python
@direction.setter def direction(self, value): ' Set direction value.\n\n Notes:\n Direction\n\n \n ' self._direction = value
@direction.setter def direction(self, value): ' Set direction value.\n\n Notes:\n Direction\n\n \n ' self._direction = value<|docstring|>Set direction value. Notes: Direction<|endoftext|>
086d663638fd09f513fc8a722129cf6ab6e132daed46e0aea683343015122cba
@property def entity_scope(self): ' Get entity_scope value.\n\n Notes:\n Specify if scope of entity is Data center or Enterprise level\n\n \n This attribute is named `entityScope` in VSD API.\n \n ' return self._entity_scope
Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API.
vspk/v4_0/nuexternalservice.py
entity_scope
cldelcourt/vspk-python
0
python
@property def entity_scope(self): ' Get entity_scope value.\n\n Notes:\n Specify if scope of entity is Data center or Enterprise level\n\n \n This attribute is named `entityScope` in VSD API.\n \n ' return self._entity_scope
@property def entity_scope(self): ' Get entity_scope value.\n\n Notes:\n Specify if scope of entity is Data center or Enterprise level\n\n \n This attribute is named `entityScope` in VSD API.\n \n ' return self._entity_scope<|docstring|>Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API.<|endoftext|>
1589f43a7c02e6c81157f9297d716e8f62ca20ff4f1c21245114f8ff0875dbbb
@entity_scope.setter def entity_scope(self, value): ' Set entity_scope value.\n\n Notes:\n Specify if scope of entity is Data center or Enterprise level\n\n \n This attribute is named `entityScope` in VSD API.\n \n ' self._entity_scope = value
Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API.
vspk/v4_0/nuexternalservice.py
entity_scope
cldelcourt/vspk-python
0
python
@entity_scope.setter def entity_scope(self, value): ' Set entity_scope value.\n\n Notes:\n Specify if scope of entity is Data center or Enterprise level\n\n \n This attribute is named `entityScope` in VSD API.\n \n ' self._entity_scope = value
@entity_scope.setter def entity_scope(self, value): ' Set entity_scope value.\n\n Notes:\n Specify if scope of entity is Data center or Enterprise level\n\n \n This attribute is named `entityScope` in VSD API.\n \n ' self._entity_scope = value<|docstring|>Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API.<|endoftext|>
81e1284abf6dbac3ca65e453824711d4cd8d94e4befe4635a8e5a9601cb7c453
@property def external_id(self): ' Get external_id value.\n\n Notes:\n External object ID. Used for integration with third party systems\n\n \n This attribute is named `externalID` in VSD API.\n \n ' return self._external_id
Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API.
vspk/v4_0/nuexternalservice.py
external_id
cldelcourt/vspk-python
0
python
@property def external_id(self): ' Get external_id value.\n\n Notes:\n External object ID. Used for integration with third party systems\n\n \n This attribute is named `externalID` in VSD API.\n \n ' return self._external_id
@property def external_id(self): ' Get external_id value.\n\n Notes:\n External object ID. Used for integration with third party systems\n\n \n This attribute is named `externalID` in VSD API.\n \n ' return self._external_id<|docstring|>Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API.<|endoftext|>
61e254ddf0aaecbedeb7e09b0dd5fa779d32936e7edb2e664aaaa566c1fe53c4
@external_id.setter def external_id(self, value): ' Set external_id value.\n\n Notes:\n External object ID. Used for integration with third party systems\n\n \n This attribute is named `externalID` in VSD API.\n \n ' self._external_id = value
Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API.
vspk/v4_0/nuexternalservice.py
external_id
cldelcourt/vspk-python
0
python
@external_id.setter def external_id(self, value): ' Set external_id value.\n\n Notes:\n External object ID. Used for integration with third party systems\n\n \n This attribute is named `externalID` in VSD API.\n \n ' self._external_id = value
@external_id.setter def external_id(self, value): ' Set external_id value.\n\n Notes:\n External object ID. Used for integration with third party systems\n\n \n This attribute is named `externalID` in VSD API.\n \n ' self._external_id = value<|docstring|>Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API.<|endoftext|>
493733db598520ef368fe6552be5863dbc3631fde735dc3c68076f99aa5ce8b2
@property def last_updated_by(self): ' Get last_updated_by value.\n\n Notes:\n ID of the user who last updated the object.\n\n \n This attribute is named `lastUpdatedBy` in VSD API.\n \n ' return self._last_updated_by
Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API.
vspk/v4_0/nuexternalservice.py
last_updated_by
cldelcourt/vspk-python
0
python
@property def last_updated_by(self): ' Get last_updated_by value.\n\n Notes:\n ID of the user who last updated the object.\n\n \n This attribute is named `lastUpdatedBy` in VSD API.\n \n ' return self._last_updated_by
@property def last_updated_by(self): ' Get last_updated_by value.\n\n Notes:\n ID of the user who last updated the object.\n\n \n This attribute is named `lastUpdatedBy` in VSD API.\n \n ' return self._last_updated_by<|docstring|>Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API.<|endoftext|>
4b7c8fd8d3238dbf1ceae62c487828a281407066cc499227b53e0ddf465e9d88
@last_updated_by.setter def last_updated_by(self, value): ' Set last_updated_by value.\n\n Notes:\n ID of the user who last updated the object.\n\n \n This attribute is named `lastUpdatedBy` in VSD API.\n \n ' self._last_updated_by = value
Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API.
vspk/v4_0/nuexternalservice.py
last_updated_by
cldelcourt/vspk-python
0
python
@last_updated_by.setter def last_updated_by(self, value): ' Set last_updated_by value.\n\n Notes:\n ID of the user who last updated the object.\n\n \n This attribute is named `lastUpdatedBy` in VSD API.\n \n ' self._last_updated_by = value
@last_updated_by.setter def last_updated_by(self, value): ' Set last_updated_by value.\n\n Notes:\n ID of the user who last updated the object.\n\n \n This attribute is named `lastUpdatedBy` in VSD API.\n \n ' self._last_updated_by = value<|docstring|>Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API.<|endoftext|>
11808b2c7b6006d5be294730b169c3002b45865902d72c7b2e4997579a9c2b25
@property def name(self): ' Get name value.\n\n Notes:\n unique name of the External Service. \n\n \n ' return self._name
Get name value. Notes: unique name of the External Service.
vspk/v4_0/nuexternalservice.py
name
cldelcourt/vspk-python
0
python
@property def name(self): ' Get name value.\n\n Notes:\n unique name of the External Service. \n\n \n ' return self._name
@property def name(self): ' Get name value.\n\n Notes:\n unique name of the External Service. \n\n \n ' return self._name<|docstring|>Get name value. Notes: unique name of the External Service.<|endoftext|>
c0d45abb6ba0257c44dfd9cebee0276ce2d21f453aabcd93bb9b00ce3c481229
@name.setter def name(self, value): ' Set name value.\n\n Notes:\n unique name of the External Service. \n\n \n ' self._name = value
Set name value. Notes: unique name of the External Service.
vspk/v4_0/nuexternalservice.py
name
cldelcourt/vspk-python
0
python
@name.setter def name(self, value): ' Set name value.\n\n Notes:\n unique name of the External Service. \n\n \n ' self._name = value
@name.setter def name(self, value): ' Set name value.\n\n Notes:\n unique name of the External Service. \n\n \n ' self._name = value<|docstring|>Set name value. Notes: unique name of the External Service.<|endoftext|>
b6a9da3e0c4ea5fc78ad975acff2b6b5e180119f76efacb7159af7b0076f48b2
@property def service_type(self): ' Get service_type value.\n\n Notes:\n Type of the service.\n\n \n This attribute is named `serviceType` in VSD API.\n \n ' return self._service_type
Get service_type value. Notes: Type of the service. This attribute is named `serviceType` in VSD API.
vspk/v4_0/nuexternalservice.py
service_type
cldelcourt/vspk-python
0
python
@property def service_type(self): ' Get service_type value.\n\n Notes:\n Type of the service.\n\n \n This attribute is named `serviceType` in VSD API.\n \n ' return self._service_type
@property def service_type(self): ' Get service_type value.\n\n Notes:\n Type of the service.\n\n \n This attribute is named `serviceType` in VSD API.\n \n ' return self._service_type<|docstring|>Get service_type value. Notes: Type of the service. This attribute is named `serviceType` in VSD API.<|endoftext|>
968e7978e57bab8b6d2615abf0bfcb4a1dee5eca0d1141382332712c767197b0
@service_type.setter def service_type(self, value): ' Set service_type value.\n\n Notes:\n Type of the service.\n\n \n This attribute is named `serviceType` in VSD API.\n \n ' self._service_type = value
Set service_type value. Notes: Type of the service. This attribute is named `serviceType` in VSD API.
vspk/v4_0/nuexternalservice.py
service_type
cldelcourt/vspk-python
0
python
@service_type.setter def service_type(self, value): ' Set service_type value.\n\n Notes:\n Type of the service.\n\n \n This attribute is named `serviceType` in VSD API.\n \n ' self._service_type = value
@service_type.setter def service_type(self, value): ' Set service_type value.\n\n Notes:\n Type of the service.\n\n \n This attribute is named `serviceType` in VSD API.\n \n ' self._service_type = value<|docstring|>Set service_type value. Notes: Type of the service. This attribute is named `serviceType` in VSD API.<|endoftext|>
e3f8b94bfe94e5943182b740d9107b6c5289b9a0709f6f95787274c4a6b95c5f
@property def stage(self): ' Get stage value.\n\n Notes:\n Stage - START,END Possible values are START, .\n\n \n ' return self._stage
Get stage value. Notes: Stage - START,END Possible values are START, .
vspk/v4_0/nuexternalservice.py
stage
cldelcourt/vspk-python
0
python
@property def stage(self): ' Get stage value.\n\n Notes:\n Stage - START,END Possible values are START, .\n\n \n ' return self._stage
@property def stage(self): ' Get stage value.\n\n Notes:\n Stage - START,END Possible values are START, .\n\n \n ' return self._stage<|docstring|>Get stage value. Notes: Stage - START,END Possible values are START, .<|endoftext|>
023604ed038a800302b046ac91a40aec4e05c5ffd5f55fc2af3d8606192ac932
@stage.setter def stage(self, value): ' Set stage value.\n\n Notes:\n Stage - START,END Possible values are START, .\n\n \n ' self._stage = value
Set stage value. Notes: Stage - START,END Possible values are START, .
vspk/v4_0/nuexternalservice.py
stage
cldelcourt/vspk-python
0
python
@stage.setter def stage(self, value): ' Set stage value.\n\n Notes:\n Stage - START,END Possible values are START, .\n\n \n ' self._stage = value
@stage.setter def stage(self, value): ' Set stage value.\n\n Notes:\n Stage - START,END Possible values are START, .\n\n \n ' self._stage = value<|docstring|>Set stage value. Notes: Stage - START,END Possible values are START, .<|endoftext|>
6013000e19fbffd19e810f9441111a48a04a0ede2d468f8a1c9abcdf888f50cc
def discovery_documents(filepath, preferred=False, skip=None): 'Returns a map of API IDs to Discovery document filenames.\n\n Args:\n filepath (str): the directory to work in. Discovery documents are\n downloaded to this directory.\n preferred (bool, optional): if true, only APIs marked as preferred are\n returned.\n skip (list, optional): a list of API IDs to skip.\n\n Returns:\n dict(string, string): a map of API IDs to Discovery document\n filenames.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name())) filenames = glob.glob(join(repo.filepath, 'discoveries/*.json')) filenames = [x for x in filenames if (os.path.basename(x) != 'index.json')] ddocs = {} for filename in filenames: id_ = None with open(filename) as file_: id_ = json.load(file_)['id'] if (id_ in ddocs): continue ddocs[id_] = filename if skip: _ = [ddocs.pop(id_, None) for id_ in skip] if (not preferred): return ddocs index = {} with open(join(repo.filepath, 'discoveries/index.json')) as file_: index = json.load(file_) for api in index['items']: id_ = api['id'] if (id_ in _ACTUALLY_PREFERRED): continue if api['preferred']: continue ddocs.pop(id_, None) return ddocs
Returns a map of API IDs to Discovery document filenames. Args: filepath (str): the directory to work in. Discovery documents are downloaded to this directory. preferred (bool, optional): if true, only APIs marked as preferred are returned. skip (list, optional): a list of API IDs to skip. Returns: dict(string, string): a map of API IDs to Discovery document filenames.
server/tasks/discovery_artifact_manager.py
discovery_documents
codyoss/discovery-artifact-manager
38
python
def discovery_documents(filepath, preferred=False, skip=None): 'Returns a map of API IDs to Discovery document filenames.\n\n Args:\n filepath (str): the directory to work in. Discovery documents are\n downloaded to this directory.\n preferred (bool, optional): if true, only APIs marked as preferred are\n returned.\n skip (list, optional): a list of API IDs to skip.\n\n Returns:\n dict(string, string): a map of API IDs to Discovery document\n filenames.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name())) filenames = glob.glob(join(repo.filepath, 'discoveries/*.json')) filenames = [x for x in filenames if (os.path.basename(x) != 'index.json')] ddocs = {} for filename in filenames: id_ = None with open(filename) as file_: id_ = json.load(file_)['id'] if (id_ in ddocs): continue ddocs[id_] = filename if skip: _ = [ddocs.pop(id_, None) for id_ in skip] if (not preferred): return ddocs index = {} with open(join(repo.filepath, 'discoveries/index.json')) as file_: index = json.load(file_) for api in index['items']: id_ = api['id'] if (id_ in _ACTUALLY_PREFERRED): continue if api['preferred']: continue ddocs.pop(id_, None) return ddocs
def discovery_documents(filepath, preferred=False, skip=None): 'Returns a map of API IDs to Discovery document filenames.\n\n Args:\n filepath (str): the directory to work in. Discovery documents are\n downloaded to this directory.\n preferred (bool, optional): if true, only APIs marked as preferred are\n returned.\n skip (list, optional): a list of API IDs to skip.\n\n Returns:\n dict(string, string): a map of API IDs to Discovery document\n filenames.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name())) filenames = glob.glob(join(repo.filepath, 'discoveries/*.json')) filenames = [x for x in filenames if (os.path.basename(x) != 'index.json')] ddocs = {} for filename in filenames: id_ = None with open(filename) as file_: id_ = json.load(file_)['id'] if (id_ in ddocs): continue ddocs[id_] = filename if skip: _ = [ddocs.pop(id_, None) for id_ in skip] if (not preferred): return ddocs index = {} with open(join(repo.filepath, 'discoveries/index.json')) as file_: index = json.load(file_) for api in index['items']: id_ = api['id'] if (id_ in _ACTUALLY_PREFERRED): continue if api['preferred']: continue ddocs.pop(id_, None) return ddocs<|docstring|>Returns a map of API IDs to Discovery document filenames. Args: filepath (str): the directory to work in. Discovery documents are downloaded to this directory. preferred (bool, optional): if true, only APIs marked as preferred are returned. skip (list, optional): a list of API IDs to skip. Returns: dict(string, string): a map of API IDs to Discovery document filenames.<|endoftext|>
f2990483d9734ec2c46808d821962e6215651557b9e7b9bee56b71284c9c228a
def update(filepath, github_account): 'Updates the discovery-artifact-manager repository.\n\n Args:\n filepath (str): the directory to work in.\n github_account (GitHubAccount): the GitHub account to commit and push\n with.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()), github_account=github_account) if (_update_disco(repo, github_account) > 0): repo.push()
Updates the discovery-artifact-manager repository. Args: filepath (str): the directory to work in. github_account (GitHubAccount): the GitHub account to commit and push with.
server/tasks/discovery_artifact_manager.py
update
codyoss/discovery-artifact-manager
38
python
def update(filepath, github_account): 'Updates the discovery-artifact-manager repository.\n\n Args:\n filepath (str): the directory to work in.\n github_account (GitHubAccount): the GitHub account to commit and push\n with.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()), github_account=github_account) if (_update_disco(repo, github_account) > 0): repo.push()
def update(filepath, github_account): 'Updates the discovery-artifact-manager repository.\n\n Args:\n filepath (str): the directory to work in.\n github_account (GitHubAccount): the GitHub account to commit and push\n with.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()), github_account=github_account) if (_update_disco(repo, github_account) > 0): repo.push()<|docstring|>Updates the discovery-artifact-manager repository. Args: filepath (str): the directory to work in. github_account (GitHubAccount): the GitHub account to commit and push with.<|endoftext|>
1601e2ddc6ebb0b23257aa13ae1206aac2516de7b5f34379f46c9476e48f72b1
def _update_disco(repo: _git.Repository, github_account: accounts.GitHubAccount) -> int: 'Invokes updatedisco on the repo. Returns the number of commits.' with TemporaryDirectory() as gopath: os.makedirs(join(gopath, 'src')) check_output(['ln', '-s', join(repo.filepath, 'src'), join(gopath, 'src/discovery-artifact-manager')]) env = os.environ.copy() env['GOPATH'] = gopath check_output(['go', 'run', 'src/main/updatedisco/main.go'], cwd=repo.filepath, env=env) repo.add(['discoveries']) if (not repo.diff_name_status()): return 0 repo.commit('Autogenerated Discovery document update', github_account.name, github_account.email) return 1
Invokes updatedisco on the repo. Returns the number of commits.
server/tasks/discovery_artifact_manager.py
_update_disco
codyoss/discovery-artifact-manager
38
python
def _update_disco(repo: _git.Repository, github_account: accounts.GitHubAccount) -> int: with TemporaryDirectory() as gopath: os.makedirs(join(gopath, 'src')) check_output(['ln', '-s', join(repo.filepath, 'src'), join(gopath, 'src/discovery-artifact-manager')]) env = os.environ.copy() env['GOPATH'] = gopath check_output(['go', 'run', 'src/main/updatedisco/main.go'], cwd=repo.filepath, env=env) repo.add(['discoveries']) if (not repo.diff_name_status()): return 0 repo.commit('Autogenerated Discovery document update', github_account.name, github_account.email) return 1
def _update_disco(repo: _git.Repository, github_account: accounts.GitHubAccount) -> int: with TemporaryDirectory() as gopath: os.makedirs(join(gopath, 'src')) check_output(['ln', '-s', join(repo.filepath, 'src'), join(gopath, 'src/discovery-artifact-manager')]) env = os.environ.copy() env['GOPATH'] = gopath check_output(['go', 'run', 'src/main/updatedisco/main.go'], cwd=repo.filepath, env=env) repo.add(['discoveries']) if (not repo.diff_name_status()): return 0 repo.commit('Autogenerated Discovery document update', github_account.name, github_account.email) return 1<|docstring|>Invokes updatedisco on the repo. Returns the number of commits.<|endoftext|>
365a1355c2c3f5c4ff6da1cc6d9b69a094eb11b86308c062b9f628357dcb7f77
def create_pull_request(filepath, github_account): 'Creates a pull request on the discovery-artifact-manager repository.\n\n Args:\n filepath (str): the directory to work in.\n github_account (GitHubAccount): the GitHub account to commit with.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()), github_account=github_account) branch = ('update-discovery-artifacts-' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) repo.checkout_new(branch) if (_update_disco(repo, github_account) > 0): repo.push(branch=branch) gh = Github(github_account.personal_access_token) gh_repo = gh.get_repo(_repo_path()) pr = gh_repo.create_pull(title='chore: autogenerated discovery document update', body='', base='master', head=branch)
Creates a pull request on the discovery-artifact-manager repository. Args: filepath (str): the directory to work in. github_account (GitHubAccount): the GitHub account to commit with.
server/tasks/discovery_artifact_manager.py
create_pull_request
codyoss/discovery-artifact-manager
38
python
def create_pull_request(filepath, github_account): 'Creates a pull request on the discovery-artifact-manager repository.\n\n Args:\n filepath (str): the directory to work in.\n github_account (GitHubAccount): the GitHub account to commit with.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()), github_account=github_account) branch = ('update-discovery-artifacts-' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) repo.checkout_new(branch) if (_update_disco(repo, github_account) > 0): repo.push(branch=branch) gh = Github(github_account.personal_access_token) gh_repo = gh.get_repo(_repo_path()) pr = gh_repo.create_pull(title='chore: autogenerated discovery document update', body=, base='master', head=branch)
def create_pull_request(filepath, github_account): 'Creates a pull request on the discovery-artifact-manager repository.\n\n Args:\n filepath (str): the directory to work in.\n github_account (GitHubAccount): the GitHub account to commit with.\n ' repo = _git.clone_from_github(_repo_path(), join(filepath, _repo_name()), github_account=github_account) branch = ('update-discovery-artifacts-' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) repo.checkout_new(branch) if (_update_disco(repo, github_account) > 0): repo.push(branch=branch) gh = Github(github_account.personal_access_token) gh_repo = gh.get_repo(_repo_path()) pr = gh_repo.create_pull(title='chore: autogenerated discovery document update', body=, base='master', head=branch)<|docstring|>Creates a pull request on the discovery-artifact-manager repository. Args: filepath (str): the directory to work in. github_account (GitHubAccount): the GitHub account to commit with.<|endoftext|>
034d28cb1760ef9317b7d3b33f1c05e9f897737d94f25993b9903944205117d2
def do_form_control_schemas(self): '\n function returns object type control schemas used in building form controls\n :return:\n ' copo_schemas = dict() for (k, v) in d_utils.object_type_control_map().items(): copo_schemas[k] = d_utils.get_copo_schema(v) self.context['copo_schemas'] = copo_schemas return self.context
function returns object type control schemas used in building form controls :return:
dal/broker_da.py
do_form_control_schemas
collaborative-open-plant-omics/COPO
16
python
def do_form_control_schemas(self): '\n function returns object type control schemas used in building form controls\n :return:\n ' copo_schemas = dict() for (k, v) in d_utils.object_type_control_map().items(): copo_schemas[k] = d_utils.get_copo_schema(v) self.context['copo_schemas'] = copo_schemas return self.context
def do_form_control_schemas(self): '\n function returns object type control schemas used in building form controls\n :return:\n ' copo_schemas = dict() for (k, v) in d_utils.object_type_control_map().items(): copo_schemas[k] = d_utils.get_copo_schema(v) self.context['copo_schemas'] = copo_schemas return self.context<|docstring|>function returns object type control schemas used in building form controls :return:<|endoftext|>
7edf5cff024283b66c78473ea1f4ff4ad6adff762d80df09de8b7c7e3c462ff5
def validate_and_delete(self): '\n function handles the delete of a record for those components\n that have provided a way of first validating (dependencies checks etc.) this action\n :return:\n ' validate_delete_method = getattr(self.da_object, 'validate_and_delete', None) if (validate_delete_method is None): return self.context if (not callable(validate_delete_method)): return self.context return self.da_object.validate_and_delete(target_id=self.param_dict.get('target_id', str()))
function handles the delete of a record for those components that have provided a way of first validating (dependencies checks etc.) this action :return:
dal/broker_da.py
validate_and_delete
collaborative-open-plant-omics/COPO
16
python
def validate_and_delete(self): '\n function handles the delete of a record for those components\n that have provided a way of first validating (dependencies checks etc.) this action\n :return:\n ' validate_delete_method = getattr(self.da_object, 'validate_and_delete', None) if (validate_delete_method is None): return self.context if (not callable(validate_delete_method)): return self.context return self.da_object.validate_and_delete(target_id=self.param_dict.get('target_id', str()))
def validate_and_delete(self): '\n function handles the delete of a record for those components\n that have provided a way of first validating (dependencies checks etc.) this action\n :return:\n ' validate_delete_method = getattr(self.da_object, 'validate_and_delete', None) if (validate_delete_method is None): return self.context if (not callable(validate_delete_method)): return self.context return self.da_object.validate_and_delete(target_id=self.param_dict.get('target_id', str()))<|docstring|>function handles the delete of a record for those components that have provided a way of first validating (dependencies checks etc.) this action :return:<|endoftext|>
adc26fb930f0334f342834bbf34ee00a23994b218f07fc29c0b92018e636e7df
def do_lift_submission_embargo(self): '\n function brokers the release of a submission\n :return:\n ' return Submission().lift_embargo(submission_id=self.param_dict.get('target_id', str()))
function brokers the release of a submission :return:
dal/broker_da.py
do_lift_submission_embargo
collaborative-open-plant-omics/COPO
16
python
def do_lift_submission_embargo(self): '\n function brokers the release of a submission\n :return:\n ' return Submission().lift_embargo(submission_id=self.param_dict.get('target_id', str()))
def do_lift_submission_embargo(self): '\n function brokers the release of a submission\n :return:\n ' return Submission().lift_embargo(submission_id=self.param_dict.get('target_id', str()))<|docstring|>function brokers the release of a submission :return:<|endoftext|>
dd909ab627300f444c1977d755160ea50ccec488f8a38c05bcaad50ff2389ea7
def do_clone_description_bundle(self): '\n function creates a new description by cloning an existing (specified) bundle\n :return:\n ' target_id = self.param_dict.get('target_id', str()) bundle_name = self.param_dict.get('bundle_name', str()) result = dict(status='success', message='') if (Description().get_description_handle().find({'name': {'$regex': (('^' + bundle_name) + '$'), '$options': 'i'}}).count() >= 1): result['status'] = 'error' result['message'] = 'Bundle name must be unique' self.context['result'] = result return self.context description = Description().GET(target_id) try: bundle = Description().create_description(profile_id=self.profile_id, component=self.component, name=bundle_name, stages=description.get('stages', list()), attributes=description.get('attributes', dict()), meta=description.get('meta', dict())) result['data'] = dict(id=str(bundle['_id']), name=bundle['name']) except Exception as e: message = ((("Couldn't create bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message self.context['result'] = result return self.context
function creates a new description by cloning an existing (specified) bundle :return:
dal/broker_da.py
do_clone_description_bundle
collaborative-open-plant-omics/COPO
16
python
def do_clone_description_bundle(self): '\n function creates a new description by cloning an existing (specified) bundle\n :return:\n ' target_id = self.param_dict.get('target_id', str()) bundle_name = self.param_dict.get('bundle_name', str()) result = dict(status='success', message=) if (Description().get_description_handle().find({'name': {'$regex': (('^' + bundle_name) + '$'), '$options': 'i'}}).count() >= 1): result['status'] = 'error' result['message'] = 'Bundle name must be unique' self.context['result'] = result return self.context description = Description().GET(target_id) try: bundle = Description().create_description(profile_id=self.profile_id, component=self.component, name=bundle_name, stages=description.get('stages', list()), attributes=description.get('attributes', dict()), meta=description.get('meta', dict())) result['data'] = dict(id=str(bundle['_id']), name=bundle['name']) except Exception as e: message = ((("Couldn't create bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message self.context['result'] = result return self.context
def do_clone_description_bundle(self): '\n function creates a new description by cloning an existing (specified) bundle\n :return:\n ' target_id = self.param_dict.get('target_id', str()) bundle_name = self.param_dict.get('bundle_name', str()) result = dict(status='success', message=) if (Description().get_description_handle().find({'name': {'$regex': (('^' + bundle_name) + '$'), '$options': 'i'}}).count() >= 1): result['status'] = 'error' result['message'] = 'Bundle name must be unique' self.context['result'] = result return self.context description = Description().GET(target_id) try: bundle = Description().create_description(profile_id=self.profile_id, component=self.component, name=bundle_name, stages=description.get('stages', list()), attributes=description.get('attributes', dict()), meta=description.get('meta', dict())) result['data'] = dict(id=str(bundle['_id']), name=bundle['name']) except Exception as e: message = ((("Couldn't create bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message self.context['result'] = result return self.context<|docstring|>function creates a new description by cloning an existing (specified) bundle :return:<|endoftext|>
1d72bd42c9b7da966390aaa32df1feecaef27f7e250c503aad3b3d2f20273e47
def create_rename_description_bundle(self): '\n function creates a new description bundle or renames an existing one\n :return:\n ' target_id = self.param_dict.get('target_id', str()) bundle_name = self.param_dict.get('bundle_name', str()) result = dict(status='success', message='') if (Description().get_description_handle().find({'name': {'$regex': (('^' + bundle_name) + '$'), '$options': 'i'}}).count() >= 1): result['status'] = 'error' result['message'] = 'Bundle name must be unique' elif target_id: Description().edit_description(target_id, {'name': bundle_name}) try: Description().edit_description(target_id, {'name': bundle_name}) except Exception as e: message = ((("Couldn't update bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message else: try: bundle = Description().create_description(profile_id=self.profile_id, component=self.component, name=bundle_name) result['data'] = dict(id=str(bundle['_id']), name=bundle['name']) except Exception as e: message = ((("Couldn't create bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message self.context['result'] = result return self.context
function creates a new description bundle or renames an existing one :return:
dal/broker_da.py
create_rename_description_bundle
collaborative-open-plant-omics/COPO
16
python
def create_rename_description_bundle(self): '\n function creates a new description bundle or renames an existing one\n :return:\n ' target_id = self.param_dict.get('target_id', str()) bundle_name = self.param_dict.get('bundle_name', str()) result = dict(status='success', message=) if (Description().get_description_handle().find({'name': {'$regex': (('^' + bundle_name) + '$'), '$options': 'i'}}).count() >= 1): result['status'] = 'error' result['message'] = 'Bundle name must be unique' elif target_id: Description().edit_description(target_id, {'name': bundle_name}) try: Description().edit_description(target_id, {'name': bundle_name}) except Exception as e: message = ((("Couldn't update bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message else: try: bundle = Description().create_description(profile_id=self.profile_id, component=self.component, name=bundle_name) result['data'] = dict(id=str(bundle['_id']), name=bundle['name']) except Exception as e: message = ((("Couldn't create bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message self.context['result'] = result return self.context
def create_rename_description_bundle(self): '\n function creates a new description bundle or renames an existing one\n :return:\n ' target_id = self.param_dict.get('target_id', str()) bundle_name = self.param_dict.get('bundle_name', str()) result = dict(status='success', message=) if (Description().get_description_handle().find({'name': {'$regex': (('^' + bundle_name) + '$'), '$options': 'i'}}).count() >= 1): result['status'] = 'error' result['message'] = 'Bundle name must be unique' elif target_id: Description().edit_description(target_id, {'name': bundle_name}) try: Description().edit_description(target_id, {'name': bundle_name}) except Exception as e: message = ((("Couldn't update bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message else: try: bundle = Description().create_description(profile_id=self.profile_id, component=self.component, name=bundle_name) result['data'] = dict(id=str(bundle['_id']), name=bundle['name']) except Exception as e: message = ((("Couldn't create bundle: " + bundle_name) + ' ') + str(e)) result['status'] = 'error' result['message'] = message self.context['result'] = result return self.context<|docstring|>function creates a new description bundle or renames an existing one :return:<|endoftext|>
c5de6bc02c87c495de6af9444d0470870bb8b7e4ef0b9ba6bdfef898098e97db
def do_managed_repositories(self): '\n function returns repositories for which the request user is a manager\n :return:\n ' self.context['table_data'] = htags.generate_managed_repositories(component=self.component, user_id=self.user_id) self.context['component'] = self.component return self.context
function returns repositories for which the request user is a manager :return:
dal/broker_da.py
do_managed_repositories
collaborative-open-plant-omics/COPO
16
python
def do_managed_repositories(self): '\n function returns repositories for which the request user is a manager\n :return:\n ' self.context['table_data'] = htags.generate_managed_repositories(component=self.component, user_id=self.user_id) self.context['component'] = self.component return self.context
def do_managed_repositories(self): '\n function returns repositories for which the request user is a manager\n :return:\n ' self.context['table_data'] = htags.generate_managed_repositories(component=self.component, user_id=self.user_id) self.context['component'] = self.component return self.context<|docstring|>function returns repositories for which the request user is a manager :return:<|endoftext|>
6489443f021d89689d19ce09a3d5348142a8ac2ae1bc1510f0abb5760496ec18
def do_get_submission_meta_repo(self): '\n function brokers metadata and repository details for a submission\n :return:\n ' target_id = self.param_dict.get('target_id', str()) self.context['result'] = htags.get_submission_meta_repo(submission_id=target_id, user_id=self.user_id) return self.context
function brokers metadata and repository details for a submission :return:
dal/broker_da.py
do_get_submission_meta_repo
collaborative-open-plant-omics/COPO
16
python
def do_get_submission_meta_repo(self): '\n function brokers metadata and repository details for a submission\n :return:\n ' target_id = self.param_dict.get('target_id', str()) self.context['result'] = htags.get_submission_meta_repo(submission_id=target_id, user_id=self.user_id) return self.context
def do_get_submission_meta_repo(self): '\n function brokers metadata and repository details for a submission\n :return:\n ' target_id = self.param_dict.get('target_id', str()) self.context['result'] = htags.get_submission_meta_repo(submission_id=target_id, user_id=self.user_id) return self.context<|docstring|>function brokers metadata and repository details for a submission :return:<|endoftext|>
89212fe861423d91291094d752692a40f081fa21159ce082554b33a730de4ce7
def do_view_submission_remote(self): '\n function brokers the generation of resource url/identifier to a submission in its remote location\n :return:\n ' self.context = htags.get_submission_remote_url(submission_id=self.param_dict.get('target_id', str())) return self.context
function brokers the generation of resource url/identifier to a submission in its remote location :return:
dal/broker_da.py
do_view_submission_remote
collaborative-open-plant-omics/COPO
16
python
def do_view_submission_remote(self): '\n function brokers the generation of resource url/identifier to a submission in its remote location\n :return:\n ' self.context = htags.get_submission_remote_url(submission_id=self.param_dict.get('target_id', str())) return self.context
def do_view_submission_remote(self): '\n function brokers the generation of resource url/identifier to a submission in its remote location\n :return:\n ' self.context = htags.get_submission_remote_url(submission_id=self.param_dict.get('target_id', str())) return self.context<|docstring|>function brokers the generation of resource url/identifier to a submission in its remote location :return:<|endoftext|>
57b41a1280a1468d87d3d79157b8955fda8fef56eaa99278a734e0f8289eb48f
def do_get_destination_repo(self): '\n function brokers submission destination repository details\n :return:\n ' target_id = self.param_dict.get('target_id', str()) self.context['result'] = htags.get_destination_repo(submission_id=target_id) return self.context
function brokers submission destination repository details :return:
dal/broker_da.py
do_get_destination_repo
collaborative-open-plant-omics/COPO
16
python
def do_get_destination_repo(self): '\n function brokers submission destination repository details\n :return:\n ' target_id = self.param_dict.get('target_id', str()) self.context['result'] = htags.get_destination_repo(submission_id=target_id) return self.context
def do_get_destination_repo(self): '\n function brokers submission destination repository details\n :return:\n ' target_id = self.param_dict.get('target_id', str()) self.context['result'] = htags.get_destination_repo(submission_id=target_id) return self.context<|docstring|>function brokers submission destination repository details :return:<|endoftext|>
392b9092e42c57b9c8896fa79c01a2ad323433012e439c1e5064616031e83359
def do_get_repo_stats(self): '\n function brokers statistics for the target repository\n :return:\n ' self.context['result'] = htags.get_repo_stats(repository_id=self.param_dict.get('target_id', str())) return self.context
function brokers statistics for the target repository :return:
dal/broker_da.py
do_get_repo_stats
collaborative-open-plant-omics/COPO
16
python
def do_get_repo_stats(self): '\n function brokers statistics for the target repository\n :return:\n ' self.context['result'] = htags.get_repo_stats(repository_id=self.param_dict.get('target_id', str())) return self.context
def do_get_repo_stats(self): '\n function brokers statistics for the target repository\n :return:\n ' self.context['result'] = htags.get_repo_stats(repository_id=self.param_dict.get('target_id', str())) return self.context<|docstring|>function brokers statistics for the target repository :return:<|endoftext|>
4d0b7ee0e0d27c9f3f528cf81104fb026a3f654b20fdd9dd2c873731d61e868a
def main(): 'Build the snippets from their source repos' parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-u', '--update', help="repo to update, or 'all' to update all source repos") parser.add_argument('-p', type=int, help='Number of threads (default %(default)s)', default=5) args = parser.parse_args() os.makedirs(build_dir, exist_ok=True) shutil.rmtree(snippet_dir) os.makedirs(snippet_dir) with open(join(here, 'sources.json'), 'r') as ifile: sources = json.load(ifile) lockfile_path = join(here, 'sources.lock') if os.path.isfile(lockfile_path): with open(lockfile_path, 'r') as ifile: lock_data = json.load(ifile) else: lock_data = {} def execute(config): should_update = ((args.update is not None) and ((args.update == 'all') or config['url'].endswith(args.update))) rev = (None if should_update else lock_data.get(config['url'])) return build_source(config, rev) with concurrent.futures.ThreadPoolExecutor(max_workers=args.p) as executor: results = executor.map(execute, sources) all_snippets = [] packages_by_language = defaultdict(set) lock_data = {} for (config, (git_rev, snippets)) in zip(sources, results): config = cast(Config, config) all_snippets.extend(snippets) lock_data[config['url']] = git_rev for snip in snippets: packages_by_language[snip['language']].add(config['url']) package_data = {'contributes': {'snippets': all_snippets}} with open(join(here, 'package.json'), 'w') as package_file: json.dump(package_data, package_file, indent=2) with open(lockfile_path, 'w') as ofile: json.dump(lock_data, ofile, indent=2) update_readme(packages_by_language)
Build the snippets from their source repos
build.py
main
stevearc/vim-vsnip-snippets
5
python
def main(): parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-u', '--update', help="repo to update, or 'all' to update all source repos") parser.add_argument('-p', type=int, help='Number of threads (default %(default)s)', default=5) args = parser.parse_args() os.makedirs(build_dir, exist_ok=True) shutil.rmtree(snippet_dir) os.makedirs(snippet_dir) with open(join(here, 'sources.json'), 'r') as ifile: sources = json.load(ifile) lockfile_path = join(here, 'sources.lock') if os.path.isfile(lockfile_path): with open(lockfile_path, 'r') as ifile: lock_data = json.load(ifile) else: lock_data = {} def execute(config): should_update = ((args.update is not None) and ((args.update == 'all') or config['url'].endswith(args.update))) rev = (None if should_update else lock_data.get(config['url'])) return build_source(config, rev) with concurrent.futures.ThreadPoolExecutor(max_workers=args.p) as executor: results = executor.map(execute, sources) all_snippets = [] packages_by_language = defaultdict(set) lock_data = {} for (config, (git_rev, snippets)) in zip(sources, results): config = cast(Config, config) all_snippets.extend(snippets) lock_data[config['url']] = git_rev for snip in snippets: packages_by_language[snip['language']].add(config['url']) package_data = {'contributes': {'snippets': all_snippets}} with open(join(here, 'package.json'), 'w') as package_file: json.dump(package_data, package_file, indent=2) with open(lockfile_path, 'w') as ofile: json.dump(lock_data, ofile, indent=2) update_readme(packages_by_language)
def main(): parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-u', '--update', help="repo to update, or 'all' to update all source repos") parser.add_argument('-p', type=int, help='Number of threads (default %(default)s)', default=5) args = parser.parse_args() os.makedirs(build_dir, exist_ok=True) shutil.rmtree(snippet_dir) os.makedirs(snippet_dir) with open(join(here, 'sources.json'), 'r') as ifile: sources = json.load(ifile) lockfile_path = join(here, 'sources.lock') if os.path.isfile(lockfile_path): with open(lockfile_path, 'r') as ifile: lock_data = json.load(ifile) else: lock_data = {} def execute(config): should_update = ((args.update is not None) and ((args.update == 'all') or config['url'].endswith(args.update))) rev = (None if should_update else lock_data.get(config['url'])) return build_source(config, rev) with concurrent.futures.ThreadPoolExecutor(max_workers=args.p) as executor: results = executor.map(execute, sources) all_snippets = [] packages_by_language = defaultdict(set) lock_data = {} for (config, (git_rev, snippets)) in zip(sources, results): config = cast(Config, config) all_snippets.extend(snippets) lock_data[config['url']] = git_rev for snip in snippets: packages_by_language[snip['language']].add(config['url']) package_data = {'contributes': {'snippets': all_snippets}} with open(join(here, 'package.json'), 'w') as package_file: json.dump(package_data, package_file, indent=2) with open(lockfile_path, 'w') as ofile: json.dump(lock_data, ofile, indent=2) update_readme(packages_by_language)<|docstring|>Build the snippets from their source repos<|endoftext|>
e709ea42ada7547c6cc2279f37c63b2dccab53dcdc8f6391e49c7145ca4f49e1
def __init__(self, factory): 'Constructor.\n\n Args:\n factory (factory.FactoryCreate)\n ' self.create = factory.create_create() self.time = factory.create_time_helper() self.servo = factory.create_servo() self.sonar = factory.create_sonar() self.virtual_create = factory.create_virtual_create() self.map = lab10_map.Map('lab10.map') self.pf = particle_filter.ParticleFilter() self.odometry = odometry.Odometry()
Constructor. Args: factory (factory.FactoryCreate)
particle_sampler/lab10.py
__init__
yesitsreallyme/Robotics
0
python
def __init__(self, factory): 'Constructor.\n\n Args:\n factory (factory.FactoryCreate)\n ' self.create = factory.create_create() self.time = factory.create_time_helper() self.servo = factory.create_servo() self.sonar = factory.create_sonar() self.virtual_create = factory.create_virtual_create() self.map = lab10_map.Map('lab10.map') self.pf = particle_filter.ParticleFilter() self.odometry = odometry.Odometry()
def __init__(self, factory): 'Constructor.\n\n Args:\n factory (factory.FactoryCreate)\n ' self.create = factory.create_create() self.time = factory.create_time_helper() self.servo = factory.create_servo() self.sonar = factory.create_sonar() self.virtual_create = factory.create_virtual_create() self.map = lab10_map.Map('lab10.map') self.pf = particle_filter.ParticleFilter() self.odometry = odometry.Odometry()<|docstring|>Constructor. Args: factory (factory.FactoryCreate)<|endoftext|>
6bf5bde467e18c7d3a33a43eac0cac3bb1954f3e62f43a92de52fa2c2d06474c
def rolling_mean(arr, window_size=7, axis=0, weights=None, mean_type='arithmetic'): 'Calculate a rolling mean over a numpy/cupy ndarray.' reimport_numerical_libs('util.rolling_mean.rolling_mean') if (mean_type == 'arithmetic'): return _rolling_arithmetic_mean(arr, window_size, axis, weights) elif (mean_type == 'geometric'): return _rolling_geometric_mean(arr, window_size, axis, weights) elif (mean_type == 'harmonic'): return _rolling_harmonic_mean(arr, window_size, axis, weights) else: raise RuntimeError
Calculate a rolling mean over a numpy/cupy ndarray.
bucky/util/rolling_mean.py
rolling_mean
ragram88/bucky
1
python
def rolling_mean(arr, window_size=7, axis=0, weights=None, mean_type='arithmetic'): reimport_numerical_libs('util.rolling_mean.rolling_mean') if (mean_type == 'arithmetic'): return _rolling_arithmetic_mean(arr, window_size, axis, weights) elif (mean_type == 'geometric'): return _rolling_geometric_mean(arr, window_size, axis, weights) elif (mean_type == 'harmonic'): return _rolling_harmonic_mean(arr, window_size, axis, weights) else: raise RuntimeError
def rolling_mean(arr, window_size=7, axis=0, weights=None, mean_type='arithmetic'): reimport_numerical_libs('util.rolling_mean.rolling_mean') if (mean_type == 'arithmetic'): return _rolling_arithmetic_mean(arr, window_size, axis, weights) elif (mean_type == 'geometric'): return _rolling_geometric_mean(arr, window_size, axis, weights) elif (mean_type == 'harmonic'): return _rolling_harmonic_mean(arr, window_size, axis, weights) else: raise RuntimeError<|docstring|>Calculate a rolling mean over a numpy/cupy ndarray.<|endoftext|>
9a70b347560214a26ed7087e80a50665aa76c2a74ec45f2c4fc49b75c60e15a1
def rolling_window(a, window_size): 'Use stride_tricks to add an extra dim on the end of an ndarray for each elements window' reimport_numerical_libs('util.rolling_mean.rolling_window') pad = xp.zeros(len(a.shape), dtype=xp.int32) pad[(- 1)] = (window_size - 1) pad = list(zip(list(xp.to_cpu(pad)), list(xp.to_cpu(xp.zeros(len(a.shape), dtype=xp.int32))))) a = xp.pad(a, pad, mode='reflect') shape = (a.shape[:(- 1)] + (((a.shape[(- 1)] - window_size) + 1), window_size)) strides = (a.strides + (a.strides[(- 1)],)) return xp.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
Use stride_tricks to add an extra dim on the end of an ndarray for each elements window
bucky/util/rolling_mean.py
rolling_window
ragram88/bucky
1
python
def rolling_window(a, window_size): reimport_numerical_libs('util.rolling_mean.rolling_window') pad = xp.zeros(len(a.shape), dtype=xp.int32) pad[(- 1)] = (window_size - 1) pad = list(zip(list(xp.to_cpu(pad)), list(xp.to_cpu(xp.zeros(len(a.shape), dtype=xp.int32))))) a = xp.pad(a, pad, mode='reflect') shape = (a.shape[:(- 1)] + (((a.shape[(- 1)] - window_size) + 1), window_size)) strides = (a.strides + (a.strides[(- 1)],)) return xp.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def rolling_window(a, window_size): reimport_numerical_libs('util.rolling_mean.rolling_window') pad = xp.zeros(len(a.shape), dtype=xp.int32) pad[(- 1)] = (window_size - 1) pad = list(zip(list(xp.to_cpu(pad)), list(xp.to_cpu(xp.zeros(len(a.shape), dtype=xp.int32))))) a = xp.pad(a, pad, mode='reflect') shape = (a.shape[:(- 1)] + (((a.shape[(- 1)] - window_size) + 1), window_size)) strides = (a.strides + (a.strides[(- 1)],)) return xp.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)<|docstring|>Use stride_tricks to add an extra dim on the end of an ndarray for each elements window<|endoftext|>
d35c78de351def423d4841922e8fcf8d347c7966f7da2a080b406fe5d7442925
def _rolling_arithmetic_mean(arr, window_size=7, axis=0, weights=None): 'Compute a rolling arithmetic mean' arr = xp.swapaxes(arr, axis, (- 1)) if (weights is None): rolling_arr = xp.mean(rolling_window(arr, window_size), axis=(- 1)) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.mean((window * rolling_window(arr, window_size)), axis=(- 1)) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr
Compute a rolling arithmetic mean
bucky/util/rolling_mean.py
_rolling_arithmetic_mean
ragram88/bucky
1
python
def _rolling_arithmetic_mean(arr, window_size=7, axis=0, weights=None): arr = xp.swapaxes(arr, axis, (- 1)) if (weights is None): rolling_arr = xp.mean(rolling_window(arr, window_size), axis=(- 1)) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.mean((window * rolling_window(arr, window_size)), axis=(- 1)) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr
def _rolling_arithmetic_mean(arr, window_size=7, axis=0, weights=None): arr = xp.swapaxes(arr, axis, (- 1)) if (weights is None): rolling_arr = xp.mean(rolling_window(arr, window_size), axis=(- 1)) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.mean((window * rolling_window(arr, window_size)), axis=(- 1)) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr<|docstring|>Compute a rolling arithmetic mean<|endoftext|>
9f8413fd8f32dfd1d45bb2c556948374c53f19a70433e3c1e902904792fa4907
def _rolling_geometric_mean(arr, window_size, axis=0, weights=None): 'Compute a rolling geometric mean' arr = xp.swapaxes(arr, axis, (- 1)) if (weights is None): rolling_arr = xp.exp(xp.nanmean(rolling_window(xp.log(arr), window_size), axis=(- 1))) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.exp(xp.nanmean((window * rolling_window(xp.log(arr), window_size)), axis=(- 1))) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr
Compute a rolling geometric mean
bucky/util/rolling_mean.py
_rolling_geometric_mean
ragram88/bucky
1
python
def _rolling_geometric_mean(arr, window_size, axis=0, weights=None): arr = xp.swapaxes(arr, axis, (- 1)) if (weights is None): rolling_arr = xp.exp(xp.nanmean(rolling_window(xp.log(arr), window_size), axis=(- 1))) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.exp(xp.nanmean((window * rolling_window(xp.log(arr), window_size)), axis=(- 1))) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr
def _rolling_geometric_mean(arr, window_size, axis=0, weights=None): arr = xp.swapaxes(arr, axis, (- 1)) if (weights is None): rolling_arr = xp.exp(xp.nanmean(rolling_window(xp.log(arr), window_size), axis=(- 1))) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.exp(xp.nanmean((window * rolling_window(xp.log(arr), window_size)), axis=(- 1))) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr<|docstring|>Compute a rolling geometric mean<|endoftext|>
e8bebe7b31372ab84e1abf5f1becf683ea2614d007eebc1761ede5013ee98c1d
def _rolling_harmonic_mean(arr, window_size, axis=0, weights=None): 'Compute a rolling harmonic mean' arr = xp.swapaxes(arr, axis, (- 1)).astype(float) if (weights is None): rolling_arr = xp.reciprocal(xp.nanmean(rolling_window(xp.reciprocal(arr), window_size), axis=(- 1))) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.reciprocal(xp.nanmean((window * rolling_window(xp.reciprocal(arr), window_size)), axis=(- 1))) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr
Compute a rolling harmonic mean
bucky/util/rolling_mean.py
_rolling_harmonic_mean
ragram88/bucky
1
python
def _rolling_harmonic_mean(arr, window_size, axis=0, weights=None): arr = xp.swapaxes(arr, axis, (- 1)).astype(float) if (weights is None): rolling_arr = xp.reciprocal(xp.nanmean(rolling_window(xp.reciprocal(arr), window_size), axis=(- 1))) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.reciprocal(xp.nanmean((window * rolling_window(xp.reciprocal(arr), window_size)), axis=(- 1))) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr
def _rolling_harmonic_mean(arr, window_size, axis=0, weights=None): arr = xp.swapaxes(arr, axis, (- 1)).astype(float) if (weights is None): rolling_arr = xp.reciprocal(xp.nanmean(rolling_window(xp.reciprocal(arr), window_size), axis=(- 1))) else: window = ((weights / xp.sum(weights)) * window_size) window = xp.broadcast_to(window, (arr.shape + (window_size,))) rolling_arr = xp.reciprocal(xp.nanmean((window * rolling_window(xp.reciprocal(arr), window_size)), axis=(- 1))) rolling_arr = xp.swapaxes(rolling_arr, axis, (- 1)) return rolling_arr<|docstring|>Compute a rolling harmonic mean<|endoftext|>
f57b84f22abd493002601ac2f3e639eb06e9b40d05ac5773883fb5548adcfd93
@project_to_simple @not_compatible_for('arrow_property', 'dynamic_property') def bfs(graph, src=0): 'Breadth first search from the src on projected simple graph.\n\n Args:\n graph (:class:`Graph`): A simple graph.\n src (optional): Source vertex of breadth first search. The type should be consistent\n with the id type of the `graph`, that is, it\'s `int` or `str` depending\n on the `oid_type` is `int64_t` or `string` of the `graph`. Defaults to 0.\n\n Returns:\n :class:`graphscope.framework.context.VertexDataContextDAGNode`:\n A context with each vertex with a distance from the source, will be evaluated in eager mode.\n\n Examples:\n\n .. code:: python\n\n import graphscope as gs\n g = gs.g()\n # Load some data, then project to a simple graph (if needed).\n pg = g.project(vertices={"vlabel": []}, edges={"elabel": []})\n r = gs.bfs(pg, 6) # use 6 as source vertex\n s.close()\n\n ' return AppAssets(algo='bfs', context='vertex_data')(graph, src)
Breadth first search from the src on projected simple graph. Args: graph (:class:`Graph`): A simple graph. src (optional): Source vertex of breadth first search. The type should be consistent with the id type of the `graph`, that is, it's `int` or `str` depending on the `oid_type` is `int64_t` or `string` of the `graph`. Defaults to 0. Returns: :class:`graphscope.framework.context.VertexDataContextDAGNode`: A context with each vertex with a distance from the source, will be evaluated in eager mode. Examples: .. code:: python import graphscope as gs g = gs.g() # Load some data, then project to a simple graph (if needed). pg = g.project(vertices={"vlabel": []}, edges={"elabel": []}) r = gs.bfs(pg, 6) # use 6 as source vertex s.close()
python/graphscope/analytical/app/bfs.py
bfs
haoxins/GraphScope
2
python
@project_to_simple @not_compatible_for('arrow_property', 'dynamic_property') def bfs(graph, src=0): 'Breadth first search from the src on projected simple graph.\n\n Args:\n graph (:class:`Graph`): A simple graph.\n src (optional): Source vertex of breadth first search. The type should be consistent\n with the id type of the `graph`, that is, it\'s `int` or `str` depending\n on the `oid_type` is `int64_t` or `string` of the `graph`. Defaults to 0.\n\n Returns:\n :class:`graphscope.framework.context.VertexDataContextDAGNode`:\n A context with each vertex with a distance from the source, will be evaluated in eager mode.\n\n Examples:\n\n .. code:: python\n\n import graphscope as gs\n g = gs.g()\n # Load some data, then project to a simple graph (if needed).\n pg = g.project(vertices={"vlabel": []}, edges={"elabel": []})\n r = gs.bfs(pg, 6) # use 6 as source vertex\n s.close()\n\n ' return AppAssets(algo='bfs', context='vertex_data')(graph, src)
@project_to_simple @not_compatible_for('arrow_property', 'dynamic_property') def bfs(graph, src=0): 'Breadth first search from the src on projected simple graph.\n\n Args:\n graph (:class:`Graph`): A simple graph.\n src (optional): Source vertex of breadth first search. The type should be consistent\n with the id type of the `graph`, that is, it\'s `int` or `str` depending\n on the `oid_type` is `int64_t` or `string` of the `graph`. Defaults to 0.\n\n Returns:\n :class:`graphscope.framework.context.VertexDataContextDAGNode`:\n A context with each vertex with a distance from the source, will be evaluated in eager mode.\n\n Examples:\n\n .. code:: python\n\n import graphscope as gs\n g = gs.g()\n # Load some data, then project to a simple graph (if needed).\n pg = g.project(vertices={"vlabel": []}, edges={"elabel": []})\n r = gs.bfs(pg, 6) # use 6 as source vertex\n s.close()\n\n ' return AppAssets(algo='bfs', context='vertex_data')(graph, src)<|docstring|>Breadth first search from the src on projected simple graph. Args: graph (:class:`Graph`): A simple graph. src (optional): Source vertex of breadth first search. The type should be consistent with the id type of the `graph`, that is, it's `int` or `str` depending on the `oid_type` is `int64_t` or `string` of the `graph`. Defaults to 0. Returns: :class:`graphscope.framework.context.VertexDataContextDAGNode`: A context with each vertex with a distance from the source, will be evaluated in eager mode. Examples: .. code:: python import graphscope as gs g = gs.g() # Load some data, then project to a simple graph (if needed). pg = g.project(vertices={"vlabel": []}, edges={"elabel": []}) r = gs.bfs(pg, 6) # use 6 as source vertex s.close()<|endoftext|>
059d016d8385aeff73902fe5542942a7570a5905a7e3963e09f0738f62bdd8d0
def run(self): 'Run the Force module\n ' if (self.parent is None): raise InputError('ERROR: The Force object must be in a Simulation object to run') if (self.parent.parent is None): raise InputError('ERROR: The Force object must be in an Output object to run') output = self.parent.parent self.comp_time_angle(output) self.comp_force(output)
Run the Force module
pyleecan/Methods/Simulation/Force/run.py
run
helene-t/pyleecan
2
python
def run(self): '\n ' if (self.parent is None): raise InputError('ERROR: The Force object must be in a Simulation object to run') if (self.parent.parent is None): raise InputError('ERROR: The Force object must be in an Output object to run') output = self.parent.parent self.comp_time_angle(output) self.comp_force(output)
def run(self): '\n ' if (self.parent is None): raise InputError('ERROR: The Force object must be in a Simulation object to run') if (self.parent.parent is None): raise InputError('ERROR: The Force object must be in an Output object to run') output = self.parent.parent self.comp_time_angle(output) self.comp_force(output)<|docstring|>Run the Force module<|endoftext|>
42125eb29bb58265e2a8076d416979fd6b8dfe4127cc15dac550d6b1f8721fda
def __init__(self, ipaddr, sshport=22, name=''): '\n is host running the hypervisor\n ' NodeBase.__init__(self, ipaddr=ipaddr, sshport=sshport, role='host', name=name) self.startMonitor()
is host running the hypervisor
lib/JumpScale/tools/perftesttools/NodeHost.py
__init__
Jumpscale/jumpscale_core8
8
python
def __init__(self, ipaddr, sshport=22, name=): '\n \n ' NodeBase.__init__(self, ipaddr=ipaddr, sshport=sshport, role='host', name=name) self.startMonitor()
def __init__(self, ipaddr, sshport=22, name=): '\n \n ' NodeBase.__init__(self, ipaddr=ipaddr, sshport=sshport, role='host', name=name) self.startMonitor()<|docstring|>is host running the hypervisor<|endoftext|>
a5935fccb4045b1d610f3590121895bc4de05974481272cfc5d0102efa9bdbfc
def __init__(self, new_password=None, is_check_password=None): 'ResetServerPasswordOption - a model defined in huaweicloud sdk' self._new_password = None self._is_check_password = None self.discriminator = None self.new_password = new_password if (is_check_password is not None): self.is_check_password = is_check_password
ResetServerPasswordOption - a model defined in huaweicloud sdk
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/reset_server_password_option.py
__init__
huaweicloud/huaweicloud-sdk-python-v3
64
python
def __init__(self, new_password=None, is_check_password=None): self._new_password = None self._is_check_password = None self.discriminator = None self.new_password = new_password if (is_check_password is not None): self.is_check_password = is_check_password
def __init__(self, new_password=None, is_check_password=None): self._new_password = None self._is_check_password = None self.discriminator = None self.new_password = new_password if (is_check_password is not None): self.is_check_password = is_check_password<|docstring|>ResetServerPasswordOption - a model defined in huaweicloud sdk<|endoftext|>
3d48ab7de8f5784ab5a33e3c8a32038ba7c70af447e0a69c2fc3c09e11d6e089
@property def new_password(self): 'Gets the new_password of this ResetServerPasswordOption.\n\n 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"\'<>|\\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符\n\n :return: The new_password of this ResetServerPasswordOption.\n :rtype: str\n ' return self._new_password
Gets the new_password of this ResetServerPasswordOption. 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"'<>|\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符 :return: The new_password of this ResetServerPasswordOption. :rtype: str
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/reset_server_password_option.py
new_password
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def new_password(self): 'Gets the new_password of this ResetServerPasswordOption.\n\n 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"\'<>|\\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符\n\n :return: The new_password of this ResetServerPasswordOption.\n :rtype: str\n ' return self._new_password
@property def new_password(self): 'Gets the new_password of this ResetServerPasswordOption.\n\n 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"\'<>|\\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符\n\n :return: The new_password of this ResetServerPasswordOption.\n :rtype: str\n ' return self._new_password<|docstring|>Gets the new_password of this ResetServerPasswordOption. 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"'<>|\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符 :return: The new_password of this ResetServerPasswordOption. :rtype: str<|endoftext|>
59dc2a4761268897e3528fa23f07e84122f0f5528537e56afb9045e0870cd90a
@new_password.setter def new_password(self, new_password): 'Sets the new_password of this ResetServerPasswordOption.\n\n 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"\'<>|\\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符\n\n :param new_password: The new_password of this ResetServerPasswordOption.\n :type: str\n ' self._new_password = new_password
Sets the new_password of this ResetServerPasswordOption. 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"'<>|\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符 :param new_password: The new_password of this ResetServerPasswordOption. :type: str
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/reset_server_password_option.py
new_password
huaweicloud/huaweicloud-sdk-python-v3
64
python
@new_password.setter def new_password(self, new_password): 'Sets the new_password of this ResetServerPasswordOption.\n\n 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"\'<>|\\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符\n\n :param new_password: The new_password of this ResetServerPasswordOption.\n :type: str\n ' self._new_password = new_password
@new_password.setter def new_password(self, new_password): 'Sets the new_password of this ResetServerPasswordOption.\n\n 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"\'<>|\\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符\n\n :param new_password: The new_password of this ResetServerPasswordOption.\n :type: str\n ' self._new_password = new_password<|docstring|>Sets the new_password of this ResetServerPasswordOption. 弹性云服务器新密码。 该接口默认不做密码安全性校验;如需校验,请指定字段“is_check_password”为true。 新密码的校验规则: - 密码长度范围为8到26位。 - 允许输入的字符包括:!@%-_=+[]:./? - 禁止输入的字符包括:汉字及【】:;“”‘’、,。《》?¥…()—— ·!~`#&^,{}*();"'<>|\ $ - 复杂度上必须包含大写字母(A-Z)、小写字母(a-z)、数字(0-9)、以及允许的特殊字符中的3种以上搭配 - 不能包含用户名 "Administrator" 和“root”及逆序字符 - 不能包含用户名 "Administrator" 中连续3个字符 :param new_password: The new_password of this ResetServerPasswordOption. :type: str<|endoftext|>
be9eec3f23db8c5d3a8298312b920bebc0c86f5b7166b150b352c00e6d94dfac
@property def is_check_password(self): 'Gets the is_check_password of this ResetServerPasswordOption.\n\n 是否检查密码的复杂度。\n\n :return: The is_check_password of this ResetServerPasswordOption.\n :rtype: bool\n ' return self._is_check_password
Gets the is_check_password of this ResetServerPasswordOption. 是否检查密码的复杂度。 :return: The is_check_password of this ResetServerPasswordOption. :rtype: bool
huaweicloud-sdk-ecs/huaweicloudsdkecs/v2/model/reset_server_password_option.py
is_check_password
huaweicloud/huaweicloud-sdk-python-v3
64
python
@property def is_check_password(self): 'Gets the is_check_password of this ResetServerPasswordOption.\n\n 是否检查密码的复杂度。\n\n :return: The is_check_password of this ResetServerPasswordOption.\n :rtype: bool\n ' return self._is_check_password
@property def is_check_password(self): 'Gets the is_check_password of this ResetServerPasswordOption.\n\n 是否检查密码的复杂度。\n\n :return: The is_check_password of this ResetServerPasswordOption.\n :rtype: bool\n ' return self._is_check_password<|docstring|>Gets the is_check_password of this ResetServerPasswordOption. 是否检查密码的复杂度。 :return: The is_check_password of this ResetServerPasswordOption. :rtype: bool<|endoftext|>