code
stringlengths
17
6.64M
def run_task(task): log.info(f'Task name: {task.name}') task_args = (task.args if ('args' in task) else '') task_args = task_args.replace('$\\', '\\$') command = f'CUDA_VISIBLE_DEVICES={utils.WORKER_CUDA_DEVICE} HYDRA_CONFIG_PATH={task.config_path} {task.environ} python {task.command} repeat={task.repeat} {task_args}' log.info(f'Command: {command}') ret = os.system(command) ret = str(ret) log.info(f'Task "{task.name}" finished with return code: {ret}.') return ret
@hydra.main(config_path=os.environ['HYDRA_CONFIG_PATH']) def main(configs): auto_generated_dir = os.getcwd() os.chdir(hydra.utils.get_original_cwd()) utils.run_tasks(configs, run_task)
def run_tasks(config_path, cuda_devices): command = f'HYDRA_CONFIG_PATH={config_path} python run_tasks_on_multiple_gpus.py cuda_devices={cuda_devices}' log.info(f'Command: {command}') ret = os.system(command) if (ret != 0): raise RuntimeError(ret) return ret
def train_models(config, work_dir): tasks = [] for task_cfg in config.task_configs.split(','): task_config_path = (Path(config.config_dir) / task_cfg) task_cfg_name = os.path.splitext(task_cfg)[0] model_args_str = config.args for seed in config.seeds.split(','): args_str = model_args_str args_str += ' do_train=True do_eval=False ' args_str += f'seed={seed}' args_str += ' ' output_dir = str((((Path(work_dir) / 'models') / task_cfg_name) / seed)) args_str += f'output_dir={output_dir}' args_str += ' ' args_str += f'hydra.run.dir={output_dir}' task = {'config_path': str(task_config_path), 'environ': '', 'command': 'run_glue.py', 'name': f'model_{task_cfg_name}_{seed}', 'args': args_str} tasks.append(task) config_path = (Path(work_dir) / 'config.yaml') config_structure = {} config_structure['cuda_devices'] = '' config_structure['tasks'] = tasks config_structure['hydra'] = {'run': {'dir': work_dir}} with open(config_path, 'w') as f: yaml.dump(config_structure, f) run_tasks(config_path, config.cuda_devices)
@hydra.main(config_path=os.environ['HYDRA_CONFIG_PATH']) def main(config): auto_generated_dir = os.getcwd() log.info(f'Work dir: {auto_generated_dir}') os.chdir(hydra.utils.get_original_cwd()) train_models(config, auto_generated_dir)
class DropoutDPP(DropoutMC): dropout_id = (- 1) def __init__(self, p: float, activate=False, mask_name='dpp', max_n=100, max_frac=0.4, coef=1.0): super().__init__(p=p, activate=activate) self.mask = build_mask(mask_name) self.reset_mask = False self.max_n = max_n self.max_frac = max_frac self.coef = coef self.curr_dropout_id = DropoutDPP.update() log.debug(f'Dropout id: {self.curr_dropout_id}') @classmethod def update(cls): cls.dropout_id += 1 return cls.dropout_id def calc_mask(self, x: torch.Tensor): return self.mask(x, dropout_rate=self.p, layer_num=self.curr_dropout_id).float() def get_mask(self, x: torch.Tensor): return self.mask(x, dropout_rate=self.p, layer_num=self.curr_dropout_id).float() def calc_non_zero_neurons(self, sum_mask): frac_nonzero = ((sum_mask != 0).sum(axis=(- 1)).item() / sum_mask.shape[(- 1)]) return frac_nonzero def forward(self, x: torch.Tensor): if self.training: return torch.nn.functional.dropout(x, self.p, training=True) else: if (not self.activate): return x sum_mask = self.get_mask(x) norm = 1.0 i = 1 frac_nonzero = self.calc_non_zero_neurons(sum_mask) while ((i < self.max_n) and (frac_nonzero < self.max_frac)): mask = self.get_mask(x) sum_mask += mask i += 1 frac_nonzero = self.calc_non_zero_neurons(sum_mask) print('Number of masks:', i) res = ((x * sum_mask) / i) return res
class DropoutMC(torch.nn.Module): def __init__(self, p: float, activate=False): super().__init__() self.activate = activate self.p = p self.p_init = p def forward(self, x: torch.Tensor): return torch.nn.functional.dropout(x, self.p, training=(self.training or self.activate))
class LockedDropoutMC(DropoutMC): '\n Implementation of locked (or variational) dropout. Randomly drops out entire parameters in embedding space.\n ' def __init__(self, p: float, activate: bool=False, batch_first: bool=True): super().__init__(p, activate) self.batch_first = batch_first def forward(self, x): if self.training: self.activate = True if ((not self.activate) or (not self.p)): return x if (not self.batch_first): m = x.data.new(1, x.size(1), x.size(2)).bernoulli_((1 - self.p)) else: m = x.data.new(x.size(0), 1, x.size(2)).bernoulli_((1 - self.p)) mask = (torch.autograd.Variable(m, requires_grad=False) / (1 - self.p)) mask = mask.expand_as(x) return (mask * x)
class WordDropoutMC(DropoutMC): '\n Implementation of word dropout. Randomly drops out entire words (or characters) in embedding space.\n ' def forward(self, x): if self.training: self.activate = True if ((not self.activate) or (not self.p)): return x m = x.data.new(x.size(0), x.size(1), 1).bernoulli_((1 - self.p)) mask = torch.autograd.Variable(m, requires_grad=False) return (mask * x)
def convert_to_mc_dropout(model: torch.nn.Module, substitution_dict: Dict[(str, torch.nn.Module)]=None): for (i, layer) in enumerate(list(model.children())): proba_field_name = ('dropout_rate' if ('flair' in str(type(layer))) else 'p') module_name = list(model._modules.items())[i][0] layer_name = layer._get_name() if (layer_name in substitution_dict.keys()): model._modules[module_name] = substitution_dict[layer_name](p=getattr(layer, proba_field_name), activate=False) else: convert_to_mc_dropout(model=layer, substitution_dict=substitution_dict)
def activate_mc_dropout(model: torch.nn.Module, activate: bool, random: float=0.0, verbose: bool=False): for layer in model.children(): if isinstance(layer, DropoutMC): if verbose: print(layer) print(f'Current DO state: {layer.activate}') print(f'Switching state to: {activate}') layer.activate = activate if (activate and random): layer.p = random if (not activate): layer.p = layer.p_init else: activate_mc_dropout(model=layer, activate=activate, random=random, verbose=verbose)
class TextClassifier(): def __init__(self, auto_model, bpe_tokenizer, max_len=192, pred_loader_args={'num_workers': 1}, pred_batch_size=100, training_args=None, trainer=None): super().__init__() self._auto_model = auto_model self._bpe_tokenizer = bpe_tokenizer self._pred_loader_args = pred_loader_args self._pred_batch_size = pred_batch_size self._training_args = training_args self._trainer = trainer self._named_parameters = auto_model.named_parameters def predict(self, eval_dataset, evaluate=False, metrics=None): if (metrics is None): metrics = [] self._auto_model.eval() (logits, _, metrics) = self._trainer.predict(eval_dataset) probs = F.softmax(torch.tensor(logits), dim=1).numpy() preds = np.argmax(probs, axis=1) print(metrics) return (preds, probs)
def entropy(x): return np.sum(((- x) * np.log(np.clip(x, 1e-08, 1))), axis=(- 1))
def mean_entropy(sampled_probabilities): return entropy(np.mean(sampled_probabilities, axis=1))
def bald(sampled_probabilities): predictive_entropy = entropy(np.mean(sampled_probabilities, axis=1)) expected_entropy = np.mean(entropy(sampled_probabilities), axis=1) return (predictive_entropy - expected_entropy)
def var_ratio(sampled_probabilities): top_classes = np.argmax(sampled_probabilities, axis=(- 1)) mode_count = (lambda preds: np.max(np.bincount(preds))) modes = [mode_count(point) for point in top_classes] ue = (1.0 - (np.array(modes) / sampled_probabilities.shape[1])) return ue
def sampled_max_prob(sampled_probabilities): mean_probabilities = np.mean(sampled_probabilities, axis=1) top_probabilities = np.max(mean_probabilities, axis=(- 1)) return (1 - top_probabilities)
def probability_variance(sampled_probabilities, mean_probabilities=None): if (mean_probabilities is None): mean_probabilities = np.mean(sampled_probabilities, axis=1) mean_probabilities = np.expand_dims(mean_probabilities, axis=1) return ((sampled_probabilities - mean_probabilities) ** 2).mean(1).sum((- 1))
def find_most_common(row: Iterable[str], mode: Union[('elem', 'count')]): '\n Given iterable of words, return either most common element or its count\n ' if (mode == 'elem'): return Counter(row).most_common(1)[0][0] elif (mode == 'count'): return Counter(row).most_common(1)[0][1]
def ue_variation_ratio(answers): answers = [np.array(e, dtype=object) for e in answers] answers = np.stack(answers, (- 1)) scores = (1.0 - np.array([(find_most_common(ans, 'count') / answers.shape[1]) for ans in answers])) return scores
def get_last_dropout(model): if isinstance(model, ElectraForSequenceClassification): if isinstance(model.classifier, ElectraClassificationHeadCustom): return model.classifier.dropout2 else: return model.classifier.dropout else: return model.dropout
def set_last_dropout(model, dropout): if isinstance(model, ElectraForSequenceClassification): if isinstance(model.classifier, ElectraClassificationHeadCustom): model.classifier.dropout2 = dropout else: model.classifier.dropout else: model.dropout = dropout
def initialize_worker(): global CUDA_DEVICES global WORKER_CUDA_DEVICE WORKER_CUDA_DEVICE = CUDA_DEVICES.get() log.info(f'Worker cuda device: {WORKER_CUDA_DEVICE}')
def repeat_tasks(tasks): rep_tasks = [] for task in tasks: n_repeats = (task.n_repeats if (('n_repeats' in task) and task.n_repeats) else 1) log.info(f'N repeats: {n_repeats}') for i in range(n_repeats): new_task = copy.deepcopy(task) new_task.name = f'{new_task.name}_rep{i}' new_task.repeat = f'rep{i}' rep_tasks.append(new_task) return rep_tasks
def run_tasks(config, f_task): global CUDA_QUEUE if (not isinstance(config.cuda_devices, Iterable)): cuda_devices = [config.cuda_devices] else: cuda_devices = config.cuda_devices.split(',') log.info(f'Cuda devices: {cuda_devices}') for cuda_device in cuda_devices: CUDA_DEVICES.put(cuda_device) log.info('All tasks: {}'.format(str([t.name for t in config.tasks]))) if (('task_names' in config) and config.task_names): task_names = config.task_names.split(',') task_index = {t.name: t for t in config.tasks} tasks = [] for task_name in task_names: task_name = task_name.split('@') if (task_name[0] not in task_index): raise ValueError(f'Task name: {task_name[0]} is not in config file.') task = task_index[task_name[0]] if (len(task_name) == 2): task.n_repeats = int(task_name[1]) tasks.append(task) else: tasks = config.tasks log.info('Running tasks: {}'.format(str([t.name for t in tasks]))) tasks = repeat_tasks(tasks) pool = mp.Pool(len(cuda_devices), initializer=initialize_worker) try: pool.map(f_task, tasks) pool.close() pool.join() except KeyboardInterrupt: pool.terminate() pool.join()
def init_random_seed_for_repeat(task_config): import time from .utils_exps import initialize_seeds log.info(f'Repeat: {task_config.repeat} ====================================') base = ((task_config.random_seed + 1) if (('fixed_seed' in task_config) and task_config.fixed_seed) else (time.time_ns() // 100000)) seed = ((base * (int(task_config.repeat[3:]) + 1)) % 1000000000) log.info(f'Random seed: {seed}') initialize_seeds(seed) task_config.random_seed = seed
class CachedInferenceMixin(): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.use_cache = False self.cache_size = None self.cache = dict() def empty_cache(self): self.cache.clear() def enable_cache(self): self.use_cache = True def disable_cache(self): self.use_cache = False self.empty_cache() def set_cache_size(self, size: Optional[int]=25): self.cache_size = size @staticmethod def create_cache_key(tensor: torch.Tensor) -> int: return hash(frozenset(tensor.cpu().numpy().ravel())) def inference_body(self, body, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict): cache_key = self.create_cache_key(input_ids) if ((not self.use_cache) or (cache_key not in self.cache)): hidden_states = body(input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict) if (self.use_cache and ((self.cache_size is None) or (len(self.cache) < self.cache_size))): self.cache[cache_key] = tuple((o.detach().cpu() for o in hidden_states)) else: hidden_states = tuple((o.cuda() for o in self.cache[cache_key])) return hidden_states
class ElectraForSequenceClassificationCached(CachedInferenceMixin, ElectraForSequenceClassification): def __init__(self, config): super().__init__(config) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) discriminator_hidden_states = self.inference_body(self.electra, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) sequence_output = discriminator_hidden_states[0] logits = self.classifier(sequence_output) loss = None if (labels is not None): if (self.num_labels == 1): loss_fct = MSELoss() loss = loss_fct(logits.view((- 1)), labels.view((- 1))) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1))) if (not return_dict): output = ((logits,) + discriminator_hidden_states[1:]) return (((loss,) + output) if (loss is not None) else output) return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions)
class BertForSequenceClassificationCached(CachedInferenceMixin, BertForSequenceClassification): def __init__(self, config): super().__init__(config) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None): return_dict = (return_dict if (return_dict is not None) else self.config.use_return_dict) outputs = self.inference_body(self.bert, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if (labels is not None): if (self.num_labels == 1): loss_fct = MSELoss() loss = loss_fct(logits.view((- 1)), labels.view((- 1))) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1))) if (not return_dict): output = ((logits,) + outputs[2:]) return (((loss,) + output) if (loss is not None) else output) return SequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
def _wandb_log(_dict): if (wandb.run is not None): wandb.log(_dict) else: log.info(repr(_dict))
def init_wandb(directory, config): if (('NO_WANDB' in os.environ) and (os.environ['NO_WANDB'] == 'true')): log.info('== Working without wandb') return None directory_contents = directory.split('/') run_name = directory_contents[(- 1)] date = directory_contents[(- 2)] strat_name = directory_contents[(- 3)] model_name = directory_contents[(- 4)] task = directory_contents[(- 5)] group_name = f'{task}|{model_name}|{strat_name}|{date}' run_name = f'{run_name}' return wandb.init(group=group_name, name=run_name, config=config, job_type='train', force=True, tags=[strat_name, model_name, task])
def fn(batch): (X, _, Y, _, _, _) = list(zip(*batch)) X = [mfcc(x[0]).T for x in X] Y = [torch.tensor(tokenizer.encode(y.lower())) for y in Y] x_len = torch.tensor([x.shape[0] for x in X]) Mx = max(x_len) y_len = torch.tensor([len(y) for y in Y]) My = max(y_len) return {'x': nn.utils.rnn.pad_sequence(X, batch_first=True).transpose(1, 2), 'x_len': x_len, 'y': nn.utils.rnn.pad_sequence(Y, batch_first=True), 'y_len': y_len}
def wer(s1, s2): '\n Computes the Word Error Rate, defined as the edit distance between the\n two provided sentences after tokenizing to words.\n Arguments:\n s1 (string): space-separated sentence\n s2 (string): space-separated sentence\n ' b = set((s1.split() + s2.split())) word2char = dict(zip(b, range(len(b)))) w1 = [chr(word2char[w]) for w in s1.split()] w2 = [chr(word2char[w]) for w in s2.split()] return Lev.distance(''.join(w1), ''.join(w2))
def cer(s1, s2): '\n Computes the Character Error Rate, defined as the edit distance.\n Arguments:\n s1 (string): space-separated sentence\n s2 (string): space-separated sentence\n ' (s1, s2) = (s1.replace(' ', ''), s2.replace(' ', '')) return Lev.distance(s1, s2)
class ASRModel(pl.LightningModule): def __init__(self, config, tokenizer): super().__init__() self.config = config self.tokenizer = tokenizer self.criterion = nn.CTCLoss(blank=0, zero_infinity=True) self.streams = nn.ModuleList([StreamBlock(config['in_channel'], config['d_model'], 1, config['dropout']), StreamBlock(config['in_channel'], config['d_model'], 2, config['dropout']), StreamBlock(config['in_channel'], config['d_model'], 3, config['dropout'])]) self.dense = nn.Linear((config['d_model'] * len(self.streams)), config['n_vocab']) def configure_optimizers(self): opt = torch.optim.AdamW(self.parameters(), lr=0.0001) return opt def count_parameters(self): return sum((p.numel() for p in self.parameters() if p.requires_grad)) def forward(self, x): encs = [] orth_losses = [] for stream in self.streams: (enc, l) = stream(x) encs += [enc] orth_losses += [l] enc = torch.cat(encs, dim=2) h = self.dense(enc) return (F.log_softmax(h, dim=1), sum(orth_losses)) def step(self, batch, mode='train'): (log_probs, orth_loss) = self.forward(batch['x']) assert (log_probs.shape[1] >= batch['y'].shape[1]) log_probs = log_probs.transpose(0, 1) ctc_loss = self.criterion(log_probs, batch['y'], (batch['x_len'] // 3), batch['y_len']) alpha = 0.01 loss = ctc_loss self.log(f'{mode}_ctc_loss', ctc_loss.item()) self.log(f'{mode}_orth_loss', orth_loss.item()) self.log(f'{mode}_loss', loss.item()) str_y = [self.tokenizer.decode(y) for y in batch['y']] str_pred = [self.tokenizer.decode(y) for y in log_probs.transpose(0, 1).argmax(dim=(- 1))] wers = [wer(gt, pred) for (gt, pred) in zip(str_y, str_pred)] cers = [cer(gt, pred) for (gt, pred) in zip(str_y, str_pred)] self.log(f'{mode}_wer', (sum(wers) / len(wers))) self.log(f'{mode}_cer', (sum(wers) / len(cers))) return loss def training_step(self, batch, batch_idx): return self.step(batch) def validation_step(self, batch, batch_idx): self.step(batch) def transcribe(self, x): (log_probs, _) = self.forward(x.unsqueeze(0)) preds = log_probs.transpose(0, 1)[0].argmax(dim=(- 1)) return self.tokenizer.decode(preds)
def get_same_pad(k, s, d): assert (not ((s > 1) and (d > 1))) if (s > 1): return (((k - s) + 1) // 2) return (((k - 1) * d) // 2)
class FactorizedConv(nn.Module): def __init__(self, in_c, d_bottleneck, out_c, kernel=2, dilation=1): super().__init__() pad = get_same_pad(kernel, 1, dilation) self.f1 = nn.Conv1d(in_c, d_bottleneck, kernel, dilation=dilation, padding=(pad if ((dilation % 2) == 0) else (pad + 1))) self.f2 = nn.Conv1d(d_bottleneck, out_c, kernel, dilation=dilation, padding=pad) def semi_orth_obj(self): w = self.f1.weight m = w.reshape(w.shape[0], (w.shape[1] * w.shape[2])).T p = torch.mm(m, m.T) q = (p - torch.eye(p.shape[0])) return torch.trace(torch.mm(q, q.T)) def forward(self, x): h = self.f1(x) h = self.f2(h) o = self.semi_orth_obj() return (h, o)
class ResBlock(nn.Module): def __init__(self, in_c, d_bottleneck, out_c, kernel, stride, dilation, dropout): super().__init__() self.conv = FactorizedConv(in_c, d_bottleneck, out_c, kernel, dilation) self.post = nn.Sequential(nn.BatchNorm1d(out_c), nn.ReLU(), nn.Dropout(p=dropout)) self.scale = nn.Conv1d(in_c, out_c, 1) self.pool = nn.AvgPool1d(stride, padding=get_same_pad(stride, stride, 1)) def forward(self, x): (h, l) = self.conv(x) h = self.post(h) scaled = self.scale(x) h += scaled return (self.pool(h), l)
class StreamBlock(nn.Module): def __init__(self, in_channel, d_model, dilation, dropout=0.1): super(StreamBlock, self).__init__() self.conv_blocks = nn.ModuleList([ResBlock(in_channel, 128, d_model, 2, 3, dilation, dropout), ResBlock(d_model, 128, d_model, 2, 1, dilation, dropout), ResBlock(d_model, 128, d_model, 2, 1, dilation, dropout)]) self.attention = nn.MultiheadAttention(embed_dim=d_model, num_heads=8, dropout=dropout, batch_first=True) self.lnorm1 = nn.LayerNorm(d_model) self.ff1 = nn.Linear(d_model, 128) self.ff2 = nn.Linear(128, d_model) self.lnorm2 = nn.LayerNorm(d_model) def forward(self, x): h = x orth_losses = [] for conv in self.conv_blocks: (h, l) = conv(h) orth_losses += [l] h = h.transpose(1, 2) (att_out, _) = self.attention(h, h, h) h = self.lnorm1(h) h = self.ff2(self.ff1(h)) h = self.lnorm2(h) return (h, sum(orth_losses))
class Tokenizer(): def __init__(self, alphabet): alphabet = (['-'] + alphabet) self.index2char = dict(enumerate(alphabet)) self.char2index = {v: k for (k, v) in self.index2char.items()} self.vocab_size = len(self.char2index) def encode(self, label): y = [] for char in label: y.append(self.char2index[char]) return y def decode(self, indices, clean=True): res = '' for index in indices: if isinstance(index, torch.Tensor): index = index.item() res += self.index2char[index] if clean: return self.clean(res) return res def clean(self, seq): res = '-' for c in seq: if (c != res[(- 1)]): res += c return res.replace('-', '')
def yaml_load(filename): '\n use the filename variable to open a file, load its contents into a variable with yaml, and return it\n ' with open(filename, 'r') as file: data = yaml.load(file) return data
def parse_xml(xml_string): '\n parse xml_string and return the root element of the XML document\n ' try: root = ET.fromstring(xml_string) return root except: return None
@app.route('/xss') def xss(): '\n get the username from the request, make a response with it and return it\n ' username = request.args.get('username') response = make_response(username) return response
@app.route('/example') def profile(): '\n Get raw_data from request with utf-8 encoding and decode it with base64.\n Load the decoded data with pickle and store it in a variable and return it\n ' raw_data = request.args.get('data') data = pickle.loads(base64.b64decode(raw_data)) return data
@app.route('/some/path/bad') def targetRedirection(request): '\n get target URL from request,\n use a reqular expression to check if it is a valid target,\n then redirect to the target URL\n ' target = request.args.get('target') if re.match('^https?://', target): return redirect(target) else: return 'Invalid target'
@app.route('/some/path/bad') def targetRedirection(request): '\n get target URL from request, check if it contains a valid domain, and \n then redirect to the target URL\n ' target = request.args.get('target') if (target.startswith('http://') or target.startswith('https://')): return redirect(target) else: return 'Invalid target'
def deleteImage(request): '\n get file name from request, validate the request, and delete the image file\n ' file_name = request.args.get('file_name') os.remove(file_name) return 'Image deleted'
def readJSON(url): '\n read a json file from a url and return the parsed json object\n ' response = request.urlopen(url) data = json.loads(response.read()) return data
def getFileData(request): '\n get the filename from the request argument,\n validate the filename,\n and return the data of the file in an HttpResponse\n ' file_name = request.args.get('file_name') with open(file_name, 'rb') as file: data = file.read() return HTTPResponse(data)
def extract_tar(file_name): '\n open a TAR file, iterate over each entry, and extract its contents in the /tmp/ directory\n ' with tarfile.open(file_name) as tar: tar.extractall('/tmp/')
def execution_user_input(): '\n read a command from a user input, validate it, and execute it\n ' user_input = input('Enter a command: ') subprocess.call(user_input, shell=True) return
def command_execution(request): '\n receive a command from post request, validate it, and execute it\n ' command = request.form.get('command') os.system(command) return 'Command executed'
@app.route('/renderEnv') def render_response_from_env(): '\n get the argument "name" from request, \n create a jinja2 Environment,\n get the template.html from the environment, and render it.\n ' name = request.args.get('name') env = Environment(loader=FileSystemLoader('.')) template = env.get_template('template.html') return template.render(name=name)
@app.route('/name') def name(): '\n get "name" from the request, make a response with it and return the response object\n ' name = request.args.get('name') response = make_response(name) return response
@xss.route('/insecure/no_template_engine_replace', methods=['GET']) def no_template_engine_replace(): '\n get the "data" param from the request.\n Open "no_template_engine_replace.html" and replace "{{data}}" with the "data"\n return the html file as reponse\n ' data = request.args.get('data') with open('no_template_engine_replace.html', 'r') as file: html = file.read() html = html.replace('{{data}}', data) response = make_response(html) return response
@app.route('/renderhtml') def renderHTML(): '\n get name from request \n render the name in html to make response and return it\n ' name = request.args.get('name') response = make_response(f'<h1>{name}</h1>') return response
def remove_user(username): '\n remove user from users table\n ' with connection.cursor() as cursor: cursor.execute('DELETE FROM users WHERE username = %s', [username]) return
def show_user(request, username): '\n create a cursor to get user info from users table and return it\n ' with connection.cursor() as cursor: cursor.execute('SELECT * FROM users WHERE username = %s', [username]) user = cursor.fetchone() return user
@app.route('/normal') def normal(): '\n get dc and username from the request\n format dn as dc={} and search_filter as username={}\n intialize ldap connection on local host\n search for user in ldap\n ' dc = request.args.get('dc') username = request.args.get('username') search_filter = f'uid={username}' ldap_connection = ldap.initialize('ldap://localhost') ldap_connection.simple_bind_s() ldap_result = ldap_connection.search_s(f'dc={dc}', ldap.SCOPE_SUBTREE, search_filter) return ldap_result[0][1]['uid'][0]
@app.route('/normal') def normal(): '\n get dc and username from the request\n format dn as dc={} and search_filter as username={}\n intialize ldap3 connection on local host and get connection\n search with search_filter and connection\n ' dc = request.args.get('dc') username = request.args.get('username') search_filter = f'uid={username}' dn = f'dc={dc}' ldap_connection = ldap3.Connection(ldap3.Server('ldap://localhost:389'), auto_bind=True) ldap_connection.search(dn, search_filter) return ldap_connection.entries[0].entry_to_json()
def eval_code(request): '\n get the user input from the request and eval it\n ' user_input = request.args.get('user_input') return eval(user_input)
def code_execution(request): '\n get first_name from post request and decode string using base64\n call exec function for setname method with decoded first_name as argument\n ' first_name = request.form.get('first_name') exec('setname("{}")'.format(base64.b64decode(first_name))) return 'Name set'
@app.route('/') def index(): '\n get the module version from the request\n importing the urllib module with the version by using exec()\n ' module_version = request.args.get('module_version') exec(f'import urllib{module_version}') return 'Hello World!'
def evalInput(inputStr): '\n run the inputStr as a python expression and return the result\n ' return eval(inputStr)
@app.route('/download') def download(): '\n read the file path from the request\n and return the file as attachment\n ' file_path = request.args.get('file_path') return send_file(file_path, as_attachment=True)
@app.route('/route') def route(): '\n get the content_type from the request\n create a response with the content_type and send it\n ' content_type = request.args.get('content_type') response = Response(content_type) return response
def route(request): '\n get the content_type from the request\n create a response with the content_type and send it\n ' content_type = request.META['CONTENT_TYPE'] response = django.http.HttpResponse(content_type) return response
def sendReply(request): '\n read the data from request and send back the data as response as a HTML page\n ' data = request.args.get('data') html = (('<html><body><h1>' + data) + '</h1></body></html>') return html
def filterScriptTags(content): '\n remove script tags from content argument\n ' return re.sub('<script.*?>.*?</script>', '', content, flags=re.DOTALL)
def inputConversion(): '\n take a string input from the user\n convert it to an integer within try-except block and log the error\n return the user input after converting it to an integer\n ' try: user_input = input('Enter a number: ') user_input = int(user_input) except ValueError: logging.error('Invalid input') return user_input
@app.route('/logger') def logger(): '\n get name data from the request and log it\n ' name = request.args.get('name') logging.info(name) return name
@app.route('/log') def log(): '\n get the input from the request and log it as an error\n ' input = request.args.get('input') current_app.logger.error(input) return input
def getEncryptionKey(data, key): '\n define a static vector, cipher using AES and CBC mode\n encrypt the data using the cipher by padding upto the block_size, and return it\n ' cipher = AES.new(key, AES.MODE_CBC, b'This is an IV456') return cipher.encrypt(pad(data, 16))
def incrementByOne(arr): '\n given an array of integers, increment each integer by 1\n ' for i in range(len(arr)): arr[i] += 1 return arr
def isAuthorizedUser(userId): pass
def getUserBankAccount(userId, connection): '\n check if the user is authorized with isAuthorizedUser(userId) method.\n if yes, create a sql query to get account information of the user.\n if there is any error in retriving the result, throw an exception.\n ' if isAuthorizedUser(userId): cursor = connection.cursor() cursor.execute('SELECT * FROM users WHERE username = %s', [userId]) user = cursor.fetchone() return user else: raise Exception('User is not authorized')
def do_computation(): raise Exception('Secret info')
@app.route('/bad') def server_bad(): '\n call do_computation() method within a try-except block\n in except block, print the traceback of the exception\n ' try: do_computation() except Exception as e: print(traceback.format_exc()) return 'Hello World!'
@app.route('/crash') def main(): raise Exception()
def makeNewUserDir(username): '\n create a directory at the /home/ directory for the given username\n before creating the directory increase the privilege using raisePrivileges()\n lower the privilege using lowerPrivileges() after creating the directory\n handle the exception if the directory already exists\n ' try: raisePrivileges() os.mkdir(('/home/' + username)) lowerPrivileges() except FileExistsError: pass except Exception as e: print(e) lowerPrivileges() raise return ('/home/' + username)
def parse(data): if (data is None): return None return data.split(',')
def getRecord(request): '\n get the data from the request, parse it using parse() method and return the length\n ' data = request.args.get('data') return len(parse(data))
def checkAdminPassword(password): '\n check if the password is valid for admin\n ' if (password == 'admin'): return True else: return False
def getConnection(): '\n return a connection object for mysql\n ' return mysql.connector.connect(user='root', password='root', host='localhost', database='test')
def makeNewUserDir(): '\n get the username from the request and create a directory at the /home/ directory for the given username\n before creating the directory, increase the privileges of the directory to allow the user to write to it\n after creating the directory, change the privileges of the directory to allow the user to read from it\n handle proper error handling for the above operations\n ' try: raisePrivileges() os.mkdir(('/home/' + request.args.get('username'))) lowerPrivileges() except FileExistsError: pass except Exception as e: print(e) lowerPrivileges() raise return ('/home/' + request.args.get('username'))
def killProcess(processID): '\n kill the process with the given processID\n ' os.kill(processID, signal.SIGKILL) return 'Process killed'
def getContent(request): '\n check if the request is a get request\n if it is, check if the user is authenicated\n if the user is authenicated, make ACCESS_DENIED to False, otherwise make it True\n if ACCESS_DENIED is True, redirect to the login page, otherwise return to the content page\n ' if (request.method == 'GET'): if request.user.is_authenticated: ACCESS_DENIED = False else: ACCESS_DENIED = True if ACCESS_DENIED: return redirect('/login') else: return render(request, 'content.html') else: return render(request, 'content.html')
def authenticate(username, password, service='login'): def my_conv(n_messages, messages, p_response, app_data): '\n Simple conversation function that responds to any prompt where the echo is off with the supplied password\n ' pass '\n create a handle from PamHandle, conv from PamConv, and authenticate\n ' conv = PamConv(my_conv, None) handle = PamHandle(service, conv) return (pam_authenticate(handle, 0) == 0)
def getContext(): '\n create a SSL context and return it\n ' context = ssl.create_default_context() return context
def getContext(): '\n create a SSL context and return it\n ' context = SSL.Context(SSL.TLSv1_METHOD) return context
def getData(url): '\n make a get request to the url if it is a https url\n ' if url.startswith('https'): return requests.get(url).text return None
def getCTX(): '\n Create a SSL context\n ' ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE return ctx
def getCTX(): '\n Create a SSL stdlib context\n ' ctx = ssl.create_default_context() ctx.check_hostname = False ctx.verify_mode = ssl.CERT_NONE return ctx
def getCTX(): '\n Create a SSL context with check_hostname\n ' ctx = ssl.create_default_context() ctx.check_hostname = True ctx.verify_mode = ssl.CERT_REQUIRED return ctx
def getCTX(): '\n Create a SSL for https context with check_hostname\n ' ctx = ssl.create_default_context() ctx.check_hostname = True ctx.verify_mode = ssl.CERT_REQUIRED return ctx
@app.route('/login', methods=['GET']) def login(): return '\n <html><form action="/login" method="post">\n <p><input type="text" name="username"></p>\n <p><input type="password" name="password"></p>\n <p><button type="submit">Submit</button></p>\n </form></html>\n '
@app.route('/login', methods=['POST']) def do_login(): if ((request.form['username'] == 'admin') and (request.form['password'] == 'password')): session['username'] = request.form['username'] return redirect('/secret') else: return 'Bad username or password'
@app.route('/') def index(): return "<html>\n Hello! <a href='./login'>Login here</a>\n </html>"
@app.route('/settings') def settings(): '\n redirect to the confidential settings page for authenticated users\n ' if g.user: return redirect('/confidential') else: return 'You are not logged in'
@app.route('/') def index(): '\n get the password from the request, make a response with setting cookies, and return the response\n ' password = request.args.get('password') response = make_response(('Password is: ' + password)) response.set_cookie('password', password) return response