code stringlengths 17 6.64M |
|---|
def generate_instruction_following_data(output_dir='./', seed_tasks_path='./seed_tasks.jsonl', num_instructions_to_generate=20000, model_name='text-davinci-003', num_prompt_instructions=3, request_batch_size=5, temperature=1.0, top_p=1.0, num_cpus=16):
seed_tasks = [json.loads(l) for l in open(seed_tasks_path, 'r')]
seed_instruction_data = [{'instruction': t['instruction'], 'input': t['instances'][0]['input'], 'output': t['instances'][0]['output']} for t in seed_tasks]
print(f'Loaded {len(seed_instruction_data)} human-written seed instructions')
os.makedirs(output_dir, exist_ok=True)
request_idx = 0
machine_instruction_data = []
if os.path.exists(os.path.join(output_dir, 'regen.json')):
machine_instruction_data = utils.jload(os.path.join(output_dir, 'regen.json'))
print(f'Loaded {len(machine_instruction_data)} machine-generated instructions')
scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=False)
progress_bar = tqdm.tqdm(total=num_instructions_to_generate)
if machine_instruction_data:
progress_bar.update(len(machine_instruction_data))
all_instructions = ([d['instruction'] for d in seed_instruction_data] + [d['instruction'] for d in machine_instruction_data])
all_instruction_tokens = [scorer._tokenizer.tokenize(inst) for inst in all_instructions]
while (len(machine_instruction_data) < num_instructions_to_generate):
request_idx += 1
batch_inputs = []
for _ in range(request_batch_size):
prompt_instructions = random.sample(seed_instruction_data, num_prompt_instructions)
prompt = encode_prompt(prompt_instructions)
batch_inputs.append(prompt)
decoding_args = utils.OpenAIDecodingArguments(temperature=temperature, n=1, max_tokens=3072, top_p=top_p, stop=['\n20', '20.', '20.'])
request_start = time.time()
print('Calling openai...')
results = utils.openai_completion(prompts=batch_inputs, model_name=model_name, batch_size=request_batch_size, decoding_args=decoding_args, logit_bias={'50256': (- 100)})
request_duration = (time.time() - request_start)
print(f'request took - {request_duration}')
process_start = time.time()
instruction_data = []
for result in results:
new_instructions = post_process_gpt3_response(num_prompt_instructions, result)
instruction_data += new_instructions
total = len(instruction_data)
keep = 0
for instruction_data_entry in instruction_data:
new_instruction_tokens = scorer._tokenizer.tokenize(instruction_data_entry['instruction'])
with Pool(num_cpus) as p:
rouge_scores = p.map(partial(rouge_scorer._score_lcs, new_instruction_tokens), all_instruction_tokens)
rouge_scores = [score.fmeasure for score in rouge_scores]
most_similar_instructions = {all_instructions[i]: rouge_scores[i] for i in np.argsort(rouge_scores)[(- 10):][::(- 1)]}
if (max(rouge_scores) > 0.7):
continue
else:
keep += 1
instruction_data_entry['most_similar_instructions'] = most_similar_instructions
instruction_data_entry['avg_similarity_score'] = float(np.mean(rouge_scores))
machine_instruction_data.append(instruction_data_entry)
all_instructions.append(instruction_data_entry['instruction'])
all_instruction_tokens.append(new_instruction_tokens)
progress_bar.update(1)
process_duration = (time.time() - process_start)
print(f'Request {request_idx} took {request_duration:.2f}s, processing took {process_duration:.2f}s')
print(f'Generated {total} instructions, kept {keep} instructions')
utils.jdump(machine_instruction_data, os.path.join(output_dir, 'regen.json'))
|
def main(task, **kwargs):
globals()[task](**kwargs)
|
def train(base_model: str='', data_path: str='yahma/alpaca-cleaned', output_dir: str='./lora-alpaca', batch_size: int=128, micro_batch_size: int=8, num_epochs: int=1, learning_rate: float=0.0003, cutoff_len: int=2048, val_set_size: int=2000, lora_r: int=8, lora_alpha: int=16, lora_dropout: float=0.05, lora_target_modules: List[str]=['q_proj', 'v_proj'], train_on_inputs: bool=True, group_by_length: bool=False, wandb_project: str='', wandb_run_name: str='', wandb_watch: str='', wandb_log_model: str='', resume_from_checkpoint: str=None, prompt_template_name: str='alpaca'):
if (int(os.environ.get('LOCAL_RANK', 0)) == 0):
print(f'''Training Alpaca-LoRA model with params:
base_model: {base_model}
data_path: {data_path}
output_dir: {output_dir}
batch_size: {batch_size}
micro_batch_size: {micro_batch_size}
num_epochs: {num_epochs}
learning_rate: {learning_rate}
cutoff_len: {cutoff_len}
val_set_size: {val_set_size}
lora_r: {lora_r}
lora_alpha: {lora_alpha}
lora_dropout: {lora_dropout}
lora_target_modules: {lora_target_modules}
train_on_inputs: {train_on_inputs}
group_by_length: {group_by_length}
wandb_project: {wandb_project}
wandb_run_name: {wandb_run_name}
wandb_watch: {wandb_watch}
wandb_log_model: {wandb_log_model}
resume_from_checkpoint: {(resume_from_checkpoint or False)}
prompt template: {prompt_template_name}
''')
assert base_model, "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = 16
prompter = Prompter(prompt_template_name)
device_map = 'auto'
world_size = int(os.environ.get('WORLD_SIZE', 1))
ddp = (world_size != 1)
if ddp:
device_map = {'': int((os.environ.get('LOCAL_RANK') or 0))}
gradient_accumulation_steps = (gradient_accumulation_steps // world_size)
use_wandb = ((len(wandb_project) > 0) or (('WANDB_PROJECT' in os.environ) and (len(os.environ['WANDB_PROJECT']) > 0)))
if (len(wandb_project) > 0):
os.environ['WANDB_PROJECT'] = wandb_project
if (len(wandb_watch) > 0):
os.environ['WANDB_WATCH'] = wandb_watch
if (len(wandb_log_model) > 0):
os.environ['WANDB_LOG_MODEL'] = wandb_log_model
model = AutoModelForCausalLM.from_pretrained(base_model, load_in_8bit=False, torch_dtype=torch.float16, device_map=device_map)
tokenizer = AutoTokenizer.from_pretrained(base_model)
tokenizer.pad_token_id = 0
tokenizer.padding_side = 'left'
def tokenize(prompt, add_eos_token=True):
result = tokenizer(prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None)
if ((result['input_ids'][(- 1)] != tokenizer.eos_token_id) and (len(result['input_ids']) < cutoff_len) and add_eos_token):
result['input_ids'].append(tokenizer.eos_token_id)
result['attention_mask'].append(1)
result['labels'] = result['input_ids'].copy()
return result
def generate_and_tokenize_prompt(data_point):
full_prompt = prompter.generate_prompt(data_point['instruction'], data_point['input'], data_point['output'])
tokenized_full_prompt = tokenize(full_prompt)
if (not train_on_inputs):
user_prompt = prompter.generate_prompt(data_point['instruction'], data_point['input'])
tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt['input_ids'])
tokenized_full_prompt['labels'] = (([(- 100)] * user_prompt_len) + tokenized_full_prompt['labels'][user_prompt_len:])
return tokenized_full_prompt
if (data_path.endswith('.json') or data_path.endswith('.jsonl')):
data = load_dataset('json', data_files=data_path)
else:
data = load_dataset(data_path)
if resume_from_checkpoint:
checkpoint_name = os.path.join(resume_from_checkpoint, 'pytorch_model.bin')
if (not os.path.exists(checkpoint_name)):
checkpoint_name = os.path.join(resume_from_checkpoint, 'adapter_model.bin')
resume_from_checkpoint = False
if os.path.exists(checkpoint_name):
print(f'Restarting from {checkpoint_name}')
adapters_weights = torch.load(checkpoint_name)
else:
print(f'Checkpoint {checkpoint_name} not found')
if (val_set_size > 0):
train_val = data['train'].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_data = train_val['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = train_val['test'].shuffle().map(generate_and_tokenize_prompt)
else:
train_data = data['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = None
if ((not ddp) and (torch.cuda.device_count() > 1)):
model.is_parallelizable = True
model.model_parallel = True
trainer = transformers.Trainer(model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments(per_device_train_batch_size=batch_size, gradient_accumulation_steps=gradient_accumulation_steps, warmup_steps=100, num_train_epochs=num_epochs, learning_rate=learning_rate, fp16=True, logging_steps=10, optim='adamw_torch', evaluation_strategy=('steps' if (val_set_size > 0) else 'no'), save_strategy='steps', eval_steps=(200 if (val_set_size > 0) else None), save_steps=200, output_dir=output_dir, save_total_limit=3, load_best_model_at_end=(True if (val_set_size > 0) else False), ddp_find_unused_parameters=(False if ddp else None), report_to=('wandb' if use_wandb else None), run_name=(wandb_run_name if use_wandb else None)), data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, pad_to_multiple_of=8, return_tensors='pt', padding=True))
model.config.use_cache = False
if ((torch.__version__ >= '2') and (sys.platform != 'win32')):
model = torch.compile(model)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
model.save_pretrained(output_dir)
print("\n If there's a warning about missing keys above, please disregard :)")
|
@dataclass
class ModelArguments():
model_name_or_path: Optional[str] = field(default='facebook/opt-125m')
|
@dataclass
class DataArguments():
data_path: str = field(default=None, metadata={'help': 'Path to the training data.'})
|
@dataclass
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default='adamw_torch')
model_max_length: int = field(default=512, metadata={'help': 'Maximum sequence length. Sequences will be right padded (and possibly truncated).'})
|
def smart_tokenizer_and_embedding_resize(special_tokens_dict: Dict, tokenizer: transformers.PreTrainedTokenizer, model: transformers.PreTrainedModel):
'Resize tokenizer and embedding.\n\n Note: This is the unoptimized version that may make your embedding size not be divisible by 64.\n '
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if (num_new_tokens > 0):
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:(- num_new_tokens)].mean(dim=0, keepdim=True)
input_embeddings[(- num_new_tokens):] = input_embeddings_avg
output_embeddings[(- num_new_tokens):] = output_embeddings_avg
|
def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
'Tokenize a list of strings.'
tokenized_list = [tokenizer(text, return_tensors='pt', padding='longest', max_length=tokenizer.model_max_length, truncation=True) for text in strings]
input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list]
input_ids_lens = labels_lens = [tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list]
return dict(input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens)
|
def preprocess(sources: Sequence[str], targets: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict:
'Preprocess the data by tokenizing.'
examples = [(s + t) for (s, t) in zip(sources, targets)]
(examples_tokenized, sources_tokenized) = [_tokenize_fn(strings, tokenizer) for strings in (examples, sources)]
input_ids = examples_tokenized['input_ids']
labels = copy.deepcopy(input_ids)
for (label, source_len) in zip(labels, sources_tokenized['input_ids_lens']):
label[:source_len] = IGNORE_INDEX
return dict(input_ids=input_ids, labels=labels)
|
class SupervisedDataset(Dataset):
'Dataset for supervised fine-tuning.'
def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer):
super(SupervisedDataset, self).__init__()
logging.warning('Loading data...')
list_data_dict = utils.jload(data_path)
logging.warning('Formatting inputs...')
(prompt_input, prompt_no_input) = (PROMPT_DICT['prompt_input'], PROMPT_DICT['prompt_no_input'])
sources = [(prompt_input.format_map(example) if (example.get('input', '') != '') else prompt_no_input.format_map(example)) for example in list_data_dict]
targets = [f"{example['output']}{tokenizer.eos_token}" for example in list_data_dict]
logging.warning('Tokenizing inputs... This may take some time...')
data_dict = preprocess(sources, targets, tokenizer)
self.input_ids = data_dict['input_ids']
self.labels = data_dict['labels']
def __len__(self):
return len(self.input_ids)
def __getitem__(self, i) -> Dict[(str, torch.Tensor)]:
return dict(input_ids=self.input_ids[i], labels=self.labels[i])
|
@dataclass
class DataCollatorForSupervisedDataset(object):
'Collate examples for supervised fine-tuning.'
tokenizer: transformers.PreTrainedTokenizer
def __call__(self, instances: Sequence[Dict]) -> Dict[(str, torch.Tensor)]:
(input_ids, labels) = tuple(([instance[key] for instance in instances] for key in ('input_ids', 'labels')))
input_ids = torch.nn.utils.rnn.pad_sequence(input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id)
labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX)
return dict(input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id))
|
def make_supervised_data_module(tokenizer: transformers.PreTrainedTokenizer, data_args) -> Dict:
'Make dataset and collator for supervised fine-tuning.'
train_dataset = SupervisedDataset(tokenizer=tokenizer, data_path=data_args.data_path)
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
return dict(train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator)
|
def train():
parser = transformers.HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
model = transformers.AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=training_args.cache_dir, model_max_length=training_args.model_max_length, padding_side='right', use_fast=False)
if (tokenizer.pad_token is None):
smart_tokenizer_and_embedding_resize(special_tokens_dict=dict(pad_token=DEFAULT_PAD_TOKEN), tokenizer=tokenizer, model=model)
if ('llama' in model_args.model_name_or_path):
tokenizer.add_special_tokens({'eos_token': DEFAULT_EOS_TOKEN, 'bos_token': DEFAULT_BOS_TOKEN, 'unk_token': DEFAULT_UNK_TOKEN})
data_module = make_supervised_data_module(tokenizer=tokenizer, data_args=data_args)
trainer = Trainer(model=model, tokenizer=tokenizer, args=training_args, **data_module)
trainer.train()
trainer.save_model(training_args.output_dir)
|
@dataclasses.dataclass
class OpenAIDecodingArguments(object):
max_tokens: int = 1800
temperature: float = 0.2
top_p: float = 1.0
n: int = 1
stream: bool = False
stop: Optional[Sequence[str]] = None
presence_penalty: float = 0.0
frequency_penalty: float = 0.0
suffix: Optional[str] = None
logprobs: Optional[int] = None
echo: bool = False
|
def openai_completion(prompts: Union[(str, Sequence[str], Sequence[dict[(str, str)]], dict[(str, str)])], decoding_args: OpenAIDecodingArguments, model_name='text-davinci-003', sleep_time=2, batch_size=1, max_instances=sys.maxsize, max_batches=sys.maxsize, return_text=False, **decoding_kwargs) -> Union[(Union[StrOrOpenAIObject], Sequence[StrOrOpenAIObject], Sequence[Sequence[StrOrOpenAIObject]])]:
'Decode with OpenAI API.\n\n Args:\n prompts: A string or a list of strings to complete. If it is a chat model the strings should be formatted\n as explained here: https://github.com/openai/openai-python/blob/main/chatml.md. If it is a chat model\n it can also be a dictionary (or list thereof) as explained here:\n https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb\n decoding_args: Decoding arguments.\n model_name: Model name. Can be either in the format of "org/model" or just "model".\n sleep_time: Time to sleep once the rate-limit is hit.\n batch_size: Number of prompts to send in a single request. Only for non chat model.\n max_instances: Maximum number of prompts to decode.\n max_batches: Maximum number of batches to decode. This argument will be deprecated in the future.\n return_text: If True, return text instead of full completion object (which contains things like logprob).\n decoding_kwargs: Additional decoding arguments. Pass in `best_of` and `logit_bias` if you need them.\n\n Returns:\n A completion or a list of completions.\n Depending on return_text, return_openai_object, and decoding_args.n, the completion type can be one of\n - a string (if return_text is True)\n - an openai_object.OpenAIObject object (if return_text is False)\n - a list of objects of the above types (if decoding_args.n > 1)\n '
is_single_prompt = isinstance(prompts, (str, dict))
if is_single_prompt:
prompts = [prompts]
if (max_batches < sys.maxsize):
logging.warning('`max_batches` will be deprecated in the future, please use `max_instances` instead.Setting `max_instances` to `max_batches * batch_size` for now.')
max_instances = (max_batches * batch_size)
prompts = prompts[:max_instances]
num_prompts = len(prompts)
prompt_batches = [prompts[(batch_id * batch_size):((batch_id + 1) * batch_size)] for batch_id in range(int(math.ceil((num_prompts / batch_size))))]
completions = []
for (batch_id, prompt_batch) in tqdm.tqdm(enumerate(prompt_batches), desc='prompt_batches', total=len(prompt_batches)):
batch_decoding_args = copy.deepcopy(decoding_args)
while True:
try:
shared_kwargs = dict(model=model_name, **batch_decoding_args.__dict__, **decoding_kwargs)
completion_batch = openai.Completion.create(prompt=prompt_batch, **shared_kwargs)
choices = completion_batch.choices
for choice in choices:
choice['total_tokens'] = completion_batch.usage.total_tokens
completions.extend(choices)
break
except openai.error.OpenAIError as e:
logging.warning(f'OpenAIError: {e}.')
if ('Please reduce your prompt' in str(e)):
batch_decoding_args.max_tokens = int((batch_decoding_args.max_tokens * 0.8))
logging.warning(f'Reducing target length to {batch_decoding_args.max_tokens}, Retrying...')
else:
logging.warning('Hit request rate limit; retrying...')
time.sleep(sleep_time)
if return_text:
completions = [completion.text for completion in completions]
if (decoding_args.n > 1):
completions = [completions[i:(i + decoding_args.n)] for i in range(0, len(completions), decoding_args.n)]
if is_single_prompt:
(completions,) = completions
return completions
|
def _make_w_io_base(f, mode: str):
if (not isinstance(f, io.IOBase)):
f_dirname = os.path.dirname(f)
if (f_dirname != ''):
os.makedirs(f_dirname, exist_ok=True)
f = open(f, mode=mode)
return f
|
def _make_r_io_base(f, mode: str):
if (not isinstance(f, io.IOBase)):
f = open(f, mode=mode)
return f
|
def jdump(obj, f, mode='w', indent=4, default=str):
'Dump a str or dictionary to a file in json format.\n\n Args:\n obj: An object to be written.\n f: A string path to the location on disk.\n mode: Mode for opening the file.\n indent: Indent for storing json dictionaries.\n default: A function to handle non-serializable entries; defaults to `str`.\n '
f = _make_w_io_base(f, mode)
if isinstance(obj, (dict, list)):
json.dump(obj, f, indent=indent, default=default)
elif isinstance(obj, str):
f.write(obj)
else:
raise ValueError(f'Unexpected type: {type(obj)}')
f.close()
|
def jload(f, mode='r'):
'Load a .json file into a dictionary.'
f = _make_r_io_base(f, mode)
jdict = json.load(f)
f.close()
return jdict
|
@register_model
def mvit_tiny(pretrained=False, **kwargs):
cfg = get_cfg()
cfg_file = '../SlowFast_dev/configs/ImageNet/MVIT_T_10_CONV.yaml'
cfg.merge_from_file(cfg_file)
model = MViT(cfg)
return model
|
class INatDataset(ImageFolder):
def __init__(self, root, train=True, year=2018, transform=None, target_transform=None, category='name', loader=default_loader):
self.transform = transform
self.loader = loader
self.target_transform = target_transform
self.year = year
path_json = os.path.join(root, f"{('train' if train else 'val')}{year}.json")
with open(path_json) as json_file:
data = json.load(json_file)
with open(os.path.join(root, 'categories.json')) as json_file:
data_catg = json.load(json_file)
path_json_for_targeter = os.path.join(root, f'train{year}.json')
with open(path_json_for_targeter) as json_file:
data_for_targeter = json.load(json_file)
targeter = {}
indexer = 0
for elem in data_for_targeter['annotations']:
king = []
king.append(data_catg[int(elem['category_id'])][category])
if (king[0] not in targeter.keys()):
targeter[king[0]] = indexer
indexer += 1
self.nb_classes = len(targeter)
self.samples = []
for elem in data['images']:
cut = elem['file_name'].split('/')
target_current = int(cut[2])
path_current = os.path.join(root, cut[0], cut[2], cut[3])
categors = data_catg[target_current]
target_current_true = targeter[categors[category]]
self.samples.append((path_current, target_current_true))
|
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
if (args.data_set == 'CIFAR'):
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif (args.data_set == 'IMNET'):
root = os.path.join(args.data_path, ('train' if is_train else 'val'))
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif (args.data_set == 'INAT'):
dataset = INatDataset(args.data_path, train=is_train, year=2018, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif (args.data_set == 'INAT19'):
dataset = INatDataset(args.data_path, train=is_train, year=2019, category=args.inat_category, transform=transform)
nb_classes = dataset.nb_classes
elif (args.data_set == 'IMNETH5'):
dataset = Imagenet(args.data_path, train=is_train, transform=transform)
nb_classes = 1000
return (dataset, nb_classes)
|
def build_transform(is_train, args):
resize_im = (args.input_size > 32)
if is_train:
transform = create_transform(input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount)
if (not resize_im):
transform.transforms[0] = transforms.RandomCrop(args.input_size, padding=4)
return transform
t = []
if resize_im:
size = int(((256 / 224) * args.input_size))
t.append(transforms.Resize(size, interpolation=3))
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD))
return transforms.Compose(t)
|
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
|
class CMlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0):
super().__init__()
out_features = (out_features or in_features)
hidden_features = (hidden_features or in_features)
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
|
class GlobalSparseAttn(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, sr_ratio=1):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.sr = sr_ratio
if (self.sr > 1):
self.sampler = nn.AvgPool2d(1, sr_ratio)
kernel_size = sr_ratio
self.LocalProp = nn.ConvTranspose2d(dim, dim, kernel_size, stride=sr_ratio, groups=dim)
self.norm = nn.LayerNorm(dim)
else:
self.sampler = nn.Identity()
self.upsample = nn.Identity()
self.norm = nn.Identity()
def forward(self, x, H: int, W: int):
(B, N, C) = x.shape
if (self.sr > 1.0):
x = x.transpose(1, 2).reshape(B, C, H, W)
x = self.sampler(x)
x = x.flatten(2).transpose(1, 2)
qkv = self.qkv(x).reshape(B, (- 1), 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q @ k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, (- 1), C)
if (self.sr > 1):
x = x.permute(0, 2, 1).reshape(B, C, int((H / self.sr)), int((W / self.sr)))
x = self.LocalProp(x)
x = x.reshape(B, C, (- 1)).permute(0, 2, 1)
x = self.norm(x)
x = self.proj(x)
x = self.proj_drop(x)
return x
|
class LocalAgg(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, 1)
self.conv2 = nn.Conv2d(dim, dim, 1)
self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = nn.BatchNorm2d(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = (x + self.pos_embed(x))
x = (x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x))))))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
return x
|
class SelfAttn(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1.0):
super().__init__()
self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
self.norm1 = norm_layer(dim)
self.attn = GlobalSparseAttn(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = (DropPath(drop_path) if (drop_path > 0.0) else nn.Identity())
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int((dim * mlp_ratio))
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = (x + self.pos_embed(x))
(B, N, H, W) = x.shape
x = x.flatten(2).transpose(1, 2)
x = (x + self.drop_path(self.attn(self.norm1(x), H, W)))
x = (x + self.drop_path(self.mlp(self.norm2(x))))
x = x.transpose(1, 2).reshape(B, N, H, W)
return x
|
class LGLBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop=0.0, attn_drop=0.0, drop_path=0.0, act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1.0):
super().__init__()
if (sr_ratio > 1):
self.LocalAgg = LocalAgg(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, attn_drop, drop_path, act_layer, norm_layer)
else:
self.LocalAgg = nn.Identity()
self.SelfAttn = SelfAttn(dim, num_heads, mlp_ratio, qkv_bias, qk_scale, drop, attn_drop, drop_path, act_layer, norm_layer, sr_ratio)
def forward(self, x):
x = self.LocalAgg(x)
x = self.SelfAttn(x)
return x
|
class PatchEmbed(nn.Module):
' Image to Patch Embedding\n '
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = ((img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]))
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.norm = nn.LayerNorm(embed_dim)
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
(B, C, H, W) = x.shape
assert ((H == self.img_size[0]) and (W == self.img_size[1])), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
(B, C, H, W) = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.reshape(B, H, W, (- 1)).permute(0, 3, 1, 2).contiguous()
return x
|
class EdgeVit(nn.Module):
' Vision Transformer\n A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -\n https://arxiv.org/abs/2010.11929\n '
def __init__(self, depth=[1, 2, 5, 3], img_size=224, in_chans=3, num_classes=1000, embed_dim=[48, 96, 240, 384], head_dim=64, mlp_ratio=4.0, qkv_bias=True, qk_scale=None, representation_size=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=None, sr_ratios=[4, 2, 2, 1], **kwargs):
'\n Args:\n depth (list): depth of each stage\n img_size (int, tuple): input image size\n in_chans (int): number of input channels\n num_classes (int): number of classes for classification head\n embed_dim (list): embedding dimension of each stage\n head_dim (int): head dimension\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n qkv_bias (bool): enable bias for qkv if True\n qk_scale (float): override default qk scale of head_dim ** -0.5 if set\n representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n drop_rate (float): dropout rate\n attn_drop_rate (float): attention dropout rate\n drop_path_rate (float): stochastic depth rate\n norm_layer (nn.Module): normalization layer\n '
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim
norm_layer = (norm_layer or partial(nn.LayerNorm, eps=1e-06))
self.patch_embed1 = PatchEmbed(img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
self.patch_embed2 = PatchEmbed(img_size=(img_size // 4), patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
self.patch_embed3 = PatchEmbed(img_size=(img_size // 8), patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
self.patch_embed4 = PatchEmbed(img_size=(img_size // 16), patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depth))]
num_heads = [(dim // head_dim) for dim in embed_dim]
self.blocks1 = nn.ModuleList([LGLBlock(dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio[0], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, sr_ratio=sr_ratios[0]) for i in range(depth[0])])
self.blocks2 = nn.ModuleList([LGLBlock(dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio[1], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[(i + depth[0])], norm_layer=norm_layer, sr_ratio=sr_ratios[1]) for i in range(depth[1])])
self.blocks3 = nn.ModuleList([LGLBlock(dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio[2], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[((i + depth[0]) + depth[1])], norm_layer=norm_layer, sr_ratio=sr_ratios[2]) for i in range(depth[2])])
self.blocks4 = nn.ModuleList([LGLBlock(dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio[3], qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[(((i + depth[0]) + depth[1]) + depth[2])], norm_layer=norm_layer, sr_ratio=sr_ratios[3]) for i in range(depth[3])])
self.norm = nn.BatchNorm2d(embed_dim[(- 1)])
if representation_size:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([('fc', nn.Linear(embed_dim, representation_size)), ('act', nn.Tanh())]))
else:
self.pre_logits = nn.Identity()
self.head = (nn.Linear(embed_dim[(- 1)], num_classes) if (num_classes > 0) else nn.Identity())
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = (nn.Linear(self.embed_dim, num_classes) if (num_classes > 0) else nn.Identity())
def forward_features(self, x):
x = self.patch_embed1(x)
x = self.pos_drop(x)
for blk in self.blocks1:
x = blk(x)
x = self.patch_embed2(x)
for blk in self.blocks2:
x = blk(x)
x = self.patch_embed3(x)
for blk in self.blocks3:
x = blk(x)
x = self.patch_embed4(x)
for blk in self.blocks4:
x = blk(x)
x = self.norm(x)
x = self.pre_logits(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = x.flatten(2).mean((- 1))
x = self.head(x)
return x
|
@register_model
def edgevit_xxs(pretrained=True, **kwargs):
model = EdgeVit(depth=[1, 1, 3, 2], embed_dim=[36, 72, 144, 288], head_dim=36, mlp_ratio=([4] * 4), qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), sr_ratios=[4, 2, 2, 1], **kwargs)
model.default_cfg = _cfg()
return model
|
@register_model
def edgevit_xs(pretrained=True, **kwargs):
model = EdgeVit(depth=[1, 1, 3, 1], embed_dim=[48, 96, 240, 384], head_dim=48, mlp_ratio=([4] * 4), qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), sr_ratios=[4, 2, 2, 1], **kwargs)
model.default_cfg = _cfg()
return model
|
@register_model
def edgevit_s(pretrained=True, **kwargs):
model = EdgeVit(depth=[1, 2, 5, 3], embed_dim=[48, 96, 240, 384], head_dim=48, mlp_ratio=([4] * 4), qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), sr_ratios=[4, 2, 2, 1], **kwargs)
model.default_cfg = _cfg()
return model
|
def train_one_epoch(model: torch.nn.Module, criterion: DistillationLoss, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float=0, model_ema: Optional[ModelEma]=None, mixup_fn: Optional[Mixup]=None, set_training_mode=True):
model.train(set_training_mode)
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for (samples, targets) in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast():
outputs = model(samples)
loss = criterion(samples, outputs, targets)
loss_value = loss.item()
if (not math.isfinite(loss_value)):
print('Loss is {}, stopping training'.format(loss_value))
sys.exit(1)
optimizer.zero_grad()
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order)
torch.cuda.synchronize()
if (model_ema is not None):
model_ema.update(model)
metric_logger.update(loss=loss_value)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
metric_logger.synchronize_between_processes()
print('Averaged stats:', metric_logger)
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
for (images, target) in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
|
class Imagenet(torch.utils.data.Dataset):
'ImageNet dataset.'
def __init__(self, root_path, train=True, transform=None):
if train:
self.mode = 'train'
else:
self.mode = 'val'
self.data_path = root_path
self._construct_imdb_h5()
self.transform = transform
def safe_record_loader(self, raw_frame, attempts=10, retry_delay=1):
for j in range(attempts):
try:
img = Image.open(io.BytesIO(raw_frame)).convert('RGB')
return img
except OSError as e:
print(f'''Attempt {j}/{attempts}: failed to load
{e}''', flush=True)
if (j == (attempts - 1)):
raise
time.sleep(retry_delay)
def _construct_imdb_h5(self):
self.h5_fp = os.path.join(self.data_path, (self.mode + '.h5'))
assert osp.isfile(self.h5_fp), 'File not found: {}'.format(self.h5_fp)
self.h5_file = None
h5_file = h5py.File(self.h5_fp, 'r')
self._imdb = []
labels = sorted(list(h5_file.keys()))
for (key, value) in h5_file.items():
target = labels.index(key)
for img_name in value.keys():
self._imdb.append({'image_name': img_name, 'class_name': key, 'class': target})
self.num_videos = len(self._imdb)
def __load_h5__(self, index):
try:
if (self.h5_file is None):
self.h5_file = h5py.File(self.h5_fp, 'r')
record = self._imdb[index]
raw_frame = self.h5_file[record['class_name']][record['image_name']][()]
img = self.safe_record_loader(raw_frame)
return img
except Exception:
return None
def __getitem__(self, index):
im = self.__load_h5__(index)
if (self.transform is not None):
im = self.transform(im)
label = self._imdb[index]['class']
return (im, label)
def __len__(self):
return len(self._imdb)
|
class DistillationLoss(torch.nn.Module):
'\n This module wraps a standard criterion and adds an extra knowledge distillation loss by\n taking a teacher model prediction and using it as additional supervision.\n '
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module, distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert (distillation_type in ['none', 'soft', 'hard'])
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
'\n Args:\n inputs: The original inputs that are feed to the teacher model\n outputs: the outputs of the model to be trained. It is expected to be\n either a Tensor, or a Tuple[Tensor, Tensor], with the original output\n in the first position and the distillation predictions as the second output\n labels: the labels for the base criterion\n '
outputs_kd = None
if (not isinstance(outputs, torch.Tensor)):
(outputs, outputs_kd) = outputs
base_loss = self.base_criterion(outputs, labels)
if (self.distillation_type == 'none'):
return base_loss
if (outputs_kd is None):
raise ValueError('When knowledge distillation is enabled, the model is expected to return a Tuple[Tensor, Tensor] with the output of the class_token and the dist_token')
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if (self.distillation_type == 'soft'):
T = self.tau
distillation_loss = ((F.kl_div(F.log_softmax((outputs_kd / T), dim=1), F.log_softmax((teacher_outputs / T), dim=1), reduction='sum', log_target=True) * (T * T)) / outputs_kd.numel())
elif (self.distillation_type == 'hard'):
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = ((base_loss * (1 - self.alpha)) + (distillation_loss * self.alpha))
return loss
|
class RASampler(torch.utils.data.Sampler):
'Sampler that restricts data loading to a subset of the dataset for distributed,\n with repeated augmentation.\n It ensures that different each augmented version of a sample will be visible to a\n different process (GPU)\n Heavily based on torch.utils.data.DistributedSampler\n '
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True, num_repeats: int=3):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_world_size()
if (rank is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = dist.get_rank()
if (num_repeats < 1):
raise ValueError('num_repeats should be greater than 0')
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(((len(self.dataset) * self.num_repeats) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
self.num_selected_samples = int(math.floor((((len(self.dataset) // 256) * 256) / self.num_replicas)))
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
indices = torch.repeat_interleave(indices, repeats=self.num_repeats, dim=0).tolist()
padding_size: int = (self.total_size - len(indices))
if (padding_size > 0):
indices += indices[:padding_size]
assert (len(indices) == self.total_size)
indices = indices[self.rank:self.total_size:self.num_replicas]
assert (len(indices) == self.num_samples)
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
class SmoothedValue(object):
'Track a series of values and provide access to smoothed values over a\n window or the global series average.\n '
def __init__(self, window_size=20, fmt=None):
if (fmt is None):
fmt = '{median:.4f} ({global_avg:.4f})'
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += (value * n)
def synchronize_between_processes(self):
'\n Warning: does not synchronize the deque!\n '
if (not is_dist_avail_and_initialized()):
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return (self.total / self.count)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[(- 1)]
def __str__(self):
return self.fmt.format(median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value)
|
class MetricLogger(object):
def __init__(self, delimiter='\t'):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for (k, v) in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if (attr in self.meters):
return self.meters[attr]
if (attr in self.__dict__):
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for (name, meter) in self.meters.items():
loss_str.append('{}: {}'.format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if (not header):
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ((':' + str(len(str(len(iterable))))) + 'd')
log_msg = [header, (('[{0' + space_fmt) + '}/{1}]'), 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}']
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = (1024.0 * 1024.0)
for obj in iterable:
data_time.update((time.time() - end))
(yield obj)
iter_time.update((time.time() - end))
if (((i % print_freq) == 0) or (i == (len(iterable) - 1))):
eta_seconds = (iter_time.global_avg * (len(iterable) - i))
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=(torch.cuda.max_memory_allocated() / MB)))
else:
print(log_msg.format(i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = (time.time() - start_time)
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(header, total_time_str, (total_time / len(iterable))))
|
def _load_checkpoint_for_ema(model_ema, checkpoint):
'\n Workaround for ModelEma._load_checkpoint to accept an already-loaded object\n '
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
|
def setup_for_distributed(is_master):
'\n This function disables printing when not in master process\n '
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if (is_master or force):
builtin_print(*args, **kwargs)
__builtin__.print = print
|
def is_dist_avail_and_initialized():
if (not dist.is_available()):
return False
if (not dist.is_initialized()):
return False
return True
|
def get_world_size():
if (not is_dist_avail_and_initialized()):
return 1
return dist.get_world_size()
|
def get_rank():
if (not is_dist_avail_and_initialized()):
return 0
return dist.get_rank()
|
def is_main_process():
return (get_rank() == 0)
|
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
|
def init_distributed_mode(args):
if (('RANK' in os.environ) and ('WORLD_SIZE' in os.environ)):
args.rank = int(os.environ['RANK'])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif ('SLURM_PROCID' in os.environ):
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = (args.rank % torch.cuda.device_count())
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed((args.rank == 0))
|
class BatchGraph():
def __init__(self):
self.graph = DGLGraph()
self.number_of_nodes = 0
self.graphid_to_nodeids = {}
self.num_of_subgraphs = 0
def add_subgraph(self, _g):
assert isinstance(_g, DGLGraph)
num_new_nodes = _g.number_of_nodes()
self.graphid_to_nodeids[self.num_of_subgraphs] = torch.LongTensor(list(range(self.number_of_nodes, (self.number_of_nodes + num_new_nodes))))
self.graph.add_nodes(num_new_nodes, data=_g.ndata)
(sources, dests) = _g.all_edges()
sources += self.number_of_nodes
dests += self.number_of_nodes
self.graph.add_edges(sources, dests, data=_g.edata)
self.number_of_nodes += num_new_nodes
self.num_of_subgraphs += 1
def cuda(self, device=None):
for k in self.graphid_to_nodeids.keys():
self.graphid_to_nodeids[k] = self.graphid_to_nodeids[k].cuda(device=device)
def de_batchify_graphs(self, features=None):
if (features is None):
features = self.graph.ndata['features']
assert isinstance(features, torch.Tensor)
vectors = [features.index_select(dim=0, index=self.graphid_to_nodeids[gid]) for gid in self.graphid_to_nodeids.keys()]
lengths = [f.size(0) for f in vectors]
max_len = max(lengths)
for (i, v) in enumerate(vectors):
vectors[i] = torch.cat((v, torch.zeros(size=((max_len - v.size(0)), *v.shape[1:]), requires_grad=v.requires_grad, device=v.device)), dim=0)
output_vectors = torch.stack(vectors)
lengths = torch.LongTensor(lengths).to(device=output_vectors.device)
return (output_vectors, lengths)
def get_network_inputs(self, cuda=False):
raise NotImplementedError('Must be implemented by subclasses.')
|
class GGNNBatchGraph(BatchGraph):
def get_network_inputs(self, cuda=False, device=None):
features = self.graph.ndata['features']
edge_types = self.graph.edata['etype']
if cuda:
self.cuda(device=device)
return (self.graph, features.cuda(device=device), edge_types.cuda(device=device))
else:
return (self.graph, features, edge_types)
pass
|
class DataEntry():
def __init__(self, datset, num_nodes, features, edges, target):
self.dataset = datset
self.num_nodes = num_nodes
self.target = target
self.graph = DGLGraph()
self.features = torch.FloatTensor(features)
self.graph.add_nodes(self.num_nodes, data={'features': self.features})
for (s, _type, t) in edges:
etype_number = self.dataset.get_edge_type_number(_type)
self.graph.add_edge(s, t, data={'etype': torch.LongTensor([etype_number])})
|
class DataSet():
def __init__(self, train_src, valid_src=None, test_src=None, batch_size=32, n_ident=None, g_ident=None, l_ident=None):
self.train_examples = []
self.valid_examples = []
self.test_examples = []
self.train_batches = []
self.valid_batches = []
self.test_batches = []
self.batch_size = batch_size
self.edge_types = {}
self.max_etype = 0
self.feature_size = 0
(self.n_ident, self.g_ident, self.l_ident) = load_default_identifiers(n_ident, g_ident, l_ident)
self.read_dataset(test_src, train_src, valid_src)
self.initialize_dataset()
def initialize_dataset(self):
self.initialize_train_batch()
self.initialize_valid_batch()
self.initialize_test_batch()
def read_dataset(self, test_src, train_src, valid_src):
debug('Reading Train File!')
with open(train_src) as fp:
train_data = json.load(fp)
for entry in tqdm(train_data):
example = DataEntry(datset=self, num_nodes=len(entry[self.n_ident]), features=entry[self.n_ident], edges=entry[self.g_ident], target=entry[self.l_ident][0][0])
if (self.feature_size == 0):
self.feature_size = example.features.size(1)
debug(('Feature Size %d' % self.feature_size))
self.train_examples.append(example)
if (valid_src is not None):
debug('Reading Validation File!')
with open(valid_src) as fp:
valid_data = json.load(fp)
for entry in tqdm(valid_data):
example = DataEntry(datset=self, num_nodes=len(entry[self.n_ident]), features=entry[self.n_ident], edges=entry[self.g_ident], target=entry[self.l_ident][0][0])
self.valid_examples.append(example)
if (test_src is not None):
debug('Reading Test File!')
with open(test_src) as fp:
test_data = json.load(fp)
for entry in tqdm(test_data):
example = DataEntry(datset=self, num_nodes=len(entry[self.n_ident]), features=entry[self.n_ident], edges=entry[self.g_ident], target=entry[self.l_ident][0][0])
self.test_examples.append(example)
def get_edge_type_number(self, _type):
if (_type not in self.edge_types):
self.edge_types[_type] = self.max_etype
self.max_etype += 1
return self.edge_types[_type]
@property
def max_edge_type(self):
return self.max_etype
def initialize_train_batch(self, batch_size=(- 1)):
if (batch_size == (- 1)):
batch_size = self.batch_size
self.train_batches = initialize_batch(self.train_examples, batch_size, shuffle=True)
return len(self.train_batches)
pass
def initialize_valid_batch(self, batch_size=(- 1)):
if (batch_size == (- 1)):
batch_size = self.batch_size
self.valid_batches = initialize_batch(self.valid_examples, batch_size)
return len(self.valid_batches)
pass
def initialize_test_batch(self, batch_size=(- 1)):
if (batch_size == (- 1)):
batch_size = self.batch_size
self.test_batches = initialize_batch(self.test_examples, batch_size)
return len(self.test_batches)
pass
def get_dataset_by_ids_for_GGNN(self, entries, ids):
taken_entries = [entries[i] for i in ids]
labels = [e.target for e in taken_entries]
batch_graph = GGNNBatchGraph()
for entry in taken_entries:
batch_graph.add_subgraph(copy.deepcopy(entry.graph))
return (batch_graph, torch.FloatTensor(labels))
def get_next_train_batch(self):
if (len(self.train_batches) == 0):
self.initialize_train_batch()
ids = self.train_batches.pop()
return self.get_dataset_by_ids_for_GGNN(self.train_examples, ids)
def get_next_valid_batch(self):
if (len(self.valid_batches) == 0):
self.initialize_valid_batch()
ids = self.valid_batches.pop()
return self.get_dataset_by_ids_for_GGNN(self.valid_examples, ids)
def get_next_test_batch(self):
if (len(self.test_batches) == 0):
self.initialize_test_batch()
ids = self.test_batches.pop()
return self.get_dataset_by_ids_for_GGNN(self.test_examples, ids)
|
class DevignModel(nn.Module):
def __init__(self, input_dim, output_dim, max_edge_types, num_steps=8):
super(DevignModel, self).__init__()
self.inp_dim = input_dim
self.out_dim = output_dim
self.max_edge_types = max_edge_types
self.num_timesteps = num_steps
self.ggnn = GatedGraphConv(in_feats=input_dim, out_feats=output_dim, n_steps=num_steps, n_etypes=max_edge_types)
self.conv_l1 = torch.nn.Conv1d(output_dim, output_dim, 3)
self.maxpool1 = torch.nn.MaxPool1d(3, stride=2)
self.conv_l2 = torch.nn.Conv1d(output_dim, output_dim, 1)
self.maxpool2 = torch.nn.MaxPool1d(2, stride=2)
self.concat_dim = (input_dim + output_dim)
self.conv_l1_for_concat = torch.nn.Conv1d(self.concat_dim, self.concat_dim, 3)
self.maxpool1_for_concat = torch.nn.MaxPool1d(3, stride=2)
self.conv_l2_for_concat = torch.nn.Conv1d(self.concat_dim, self.concat_dim, 1)
self.maxpool2_for_concat = torch.nn.MaxPool1d(2, stride=2)
self.mlp_z = nn.Linear(in_features=self.concat_dim, out_features=1)
self.mlp_y = nn.Linear(in_features=output_dim, out_features=1)
self.sigmoid = nn.Sigmoid()
def forward(self, batch, cuda=False):
(graph, features, edge_types) = batch.get_network_inputs(cuda=cuda)
outputs = self.ggnn(graph, features, edge_types)
(x_i, _) = batch.de_batchify_graphs(features)
(h_i, _) = batch.de_batchify_graphs(outputs)
c_i = torch.cat((h_i, x_i), dim=(- 1))
(batch_size, num_node, _) = c_i.size()
Y_1 = self.maxpool1(f.relu(self.conv_l1(h_i.transpose(1, 2))))
Y_2 = self.maxpool2(f.relu(self.conv_l2(Y_1))).transpose(1, 2)
Z_1 = self.maxpool1_for_concat(f.relu(self.conv_l1_for_concat(c_i.transpose(1, 2))))
Z_2 = self.maxpool2_for_concat(f.relu(self.conv_l2_for_concat(Z_1))).transpose(1, 2)
before_avg = torch.mul(self.mlp_y(Y_2), self.mlp_z(Z_2))
avg = before_avg.mean(dim=1)
result = self.sigmoid(avg).squeeze(dim=(- 1))
return result
|
class GGNNSum(nn.Module):
def __init__(self, input_dim, output_dim, max_edge_types, num_steps=8):
super(GGNNSum, self).__init__()
self.inp_dim = input_dim
self.out_dim = output_dim
self.max_edge_types = max_edge_types
self.num_timesteps = num_steps
self.ggnn = GatedGraphConv(in_feats=input_dim, out_feats=output_dim, n_steps=num_steps, n_etypes=max_edge_types)
self.classifier = nn.Linear(in_features=output_dim, out_features=1)
self.sigmoid = nn.Sigmoid()
def forward(self, batch, cuda=False):
(graph, features, edge_types) = batch.get_network_inputs(cuda=cuda)
outputs = self.ggnn(graph, features, edge_types)
(h_i, _) = batch.de_batchify_graphs(outputs)
ggnn_sum = self.classifier(h_i.sum(dim=1))
result = self.sigmoid(ggnn_sum).squeeze(dim=(- 1))
return result
|
def evaluate_loss(model, loss_function, num_batches, data_iter, cuda=False):
model.eval()
with torch.no_grad():
_loss = []
(all_predictions, all_targets) = ([], [])
for _ in range(num_batches):
(graph, targets) = data_iter()
targets = targets.cuda()
predictions = model(graph, cuda=True)
batch_loss = loss_function(predictions, targets)
_loss.append(batch_loss.detach().cpu().item())
predictions = predictions.detach().cpu()
if (predictions.ndim == 2):
all_predictions.extend(np.argmax(predictions.numpy(), axis=(- 1)).tolist())
else:
all_predictions.extend(predictions.ge(torch.ones(size=predictions.size()).fill_(0.5)).to(dtype=torch.int32).numpy().tolist())
all_targets.extend(targets.detach().cpu().numpy().tolist())
model.train()
return (np.mean(_loss).item(), (accuracy_score(all_targets, all_predictions) * 100))
pass
|
def evaluate_metrics(model, loss_function, num_batches, data_iter):
model.eval()
with torch.no_grad():
_loss = []
(all_predictions, all_targets) = ([], [])
for _ in range(num_batches):
(graph, targets) = data_iter()
targets = targets.cuda()
predictions = model(graph, cuda=True)
batch_loss = loss_function(predictions, targets)
_loss.append(batch_loss.detach().cpu().item())
predictions = predictions.detach().cpu()
if (predictions.ndim == 2):
all_predictions.extend(np.argmax(predictions.numpy(), axis=(- 1)).tolist())
else:
all_predictions.extend(predictions.ge(torch.ones(size=predictions.size()).fill_(0.5)).to(dtype=torch.int32).numpy().tolist())
all_targets.extend(targets.detach().cpu().numpy().tolist())
model.train()
return ((accuracy_score(all_targets, all_predictions) * 100), (precision_score(all_targets, all_predictions) * 100), (recall_score(all_targets, all_predictions) * 100), (f1_score(all_targets, all_predictions) * 100))
pass
|
def train(model, dataset, max_steps, dev_every, loss_function, optimizer, save_path, log_every=50, max_patience=5):
debug('Start Training')
train_losses = []
best_model = None
patience_counter = 0
best_f1 = 0
try:
for step_count in range(max_steps):
model.train()
model.zero_grad()
(graph, targets) = dataset.get_next_train_batch()
targets = targets.cuda()
predictions = model(graph, cuda=True)
batch_loss = loss_function(predictions, targets)
if ((log_every is not None) and ((step_count % log_every) == (log_every - 1))):
debug(('Step %d\t\tTrain Loss %10.3f' % (step_count, batch_loss.detach().cpu().item())))
train_losses.append(batch_loss.detach().cpu().item())
batch_loss.backward()
optimizer.step()
if ((step_count % dev_every) == (dev_every - 1)):
(valid_loss, valid_f1) = evaluate_loss(model, loss_function, dataset.initialize_train_batch(), dataset.get_next_train_batch)
if (valid_f1 > best_f1):
patience_counter = 0
best_f1 = valid_f1
best_model = copy.deepcopy(model.state_dict())
_save_file = open((save_path + '-model.bin'), 'wb')
torch.save(model.state_dict(), _save_file)
_save_file.close()
else:
patience_counter += 1
debug(('Step %d\t\tTrain Loss %10.3f\tValid Loss%10.3f\tf1: %5.2f\tPatience %d' % (step_count, np.mean(train_losses).item(), valid_loss, valid_f1, patience_counter)))
debug(('=' * 100))
train_losses = []
if (patience_counter == max_patience):
break
except KeyboardInterrupt:
debug('Training Interrupted by user!')
if (best_model is not None):
model.load_state_dict(best_model)
_save_file = open((save_path + '-model.bin'), 'wb')
torch.save(model.state_dict(), _save_file)
_save_file.close()
(acc, pr, rc, f1) = evaluate_metrics(model, loss_function, dataset.initialize_train_batch(), dataset.get_next_train_batch)
debug(('%s\tTest Accuracy: %0.2f\tPrecision: %0.2f\tRecall: %0.2f\tF1: %0.2f' % (save_path, acc, pr, rc, f1)))
debug(('=' * 100))
|
def load_default_identifiers(n, g, l):
if (n is None):
n = n_identifier
if (g is None):
g = g_identifier
if (l is None):
l = l_identifier
return (n, g, l)
|
def initialize_batch(entries, batch_size, shuffle=False):
total = len(entries)
indices = np.arange(0, (total - 1), 1)
if shuffle:
np.random.shuffle(indices)
batch_indices = []
start = 0
end = len(indices)
curr = start
while (curr < end):
c_end = (curr + batch_size)
if (c_end > end):
c_end = end
batch_indices.append(indices[curr:c_end])
curr = c_end
return batch_indices[::(- 1)]
|
def tally_param(model):
total = 0
for param in model.parameters():
total += param.data.nelement()
return total
|
def debug(*msg, sep='\t'):
caller = inspect.stack()[1]
file_name = caller.filename
ln = caller.lineno
now = datetime.now()
time = now.strftime('%m/%d/%Y - %H:%M:%S')
print((((((('[' + str(time)) + '] File "') + file_name) + '", line ') + str(ln)) + ' '), end='\t')
for m in msg:
print(m, end=sep)
print('')
|
class CSharpProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
p = np.random.uniform(0, 1)
if (p < 0.5):
prefix = (((((('for ( int ' + control_variable) + ' = 0 ; ') + control_variable) + ' > 0 ; ') + control_variable) + ' ++ ) { ')
loop = ((prefix + body) + ' } ')
return loop
else:
return (('for ( ; false ; ) { ' + body) + '}')
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('while ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('while ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('while ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('if ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('if ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('if ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def for_to_while_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_for_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.for_to_while(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_string, root))
return (root, code_string, success)
@classmethod
def while_to_for_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_while_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.while_to_for(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_string, root))
except:
pass
return (root, code_string, success)
@classmethod
def extract_for_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'for_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
@classmethod
def get_tokens_replace_for(cls, code_str, for_node, root, init, cond, update, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
if (child == for_node):
tokens.extend(((((((init + [';', 'while', '(']) + cond) + [')', '{']) + body) + update) + ['}']))
else:
tokens += cls.get_tokens_replace_for(code_str, for_node, child, init, cond, update, body)
return tokens
@classmethod
def for_to_while(cls, code_string, root, fl, parser):
children = fl.children
init = children[2]
init_tokens = get_tokens(code_string, init)
comparison = children[4]
if (str(comparison.type) != ';'):
comp_tokens = get_tokens(code_string, comparison)
update = children[6]
if (str(update.type) == ')'):
update_tokens = []
body = children[7]
else:
update_tokens = (get_tokens(code_string, update) + [';'])
body = children[8]
breaking_statements = cls.get_breaking_statements(body)
body_tokens = cls.get_tokens_insert_before(code_string, body, ' '.join(update_tokens), breaking_statements)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_for(code_str=code_string, for_node=fl, root=root, init=init_tokens, cond=comp_tokens, update=update_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def get_tokens_insert_before(cls, code_str, root, insertion_code, insert_before_node):
if (not isinstance(insert_before_node, list)):
insert_before_node = [insert_before_node]
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return tokens
else:
return [code_str[root.start_byte:root.end_byte].decode()]
if (root in insert_before_node):
tokens += insertion_code.split()
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
ts = cls.get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
tokens += ts
return tokens
@classmethod
def extract_while_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'while_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def while_to_for(cls, code_string, root, wl, parser):
children = wl.children
condition = children[2]
body = children[4]
if (str(condition.type) == 'binary_expression'):
expr_tokens = get_tokens(code_string, condition)
body_tokens = get_tokens(code_string, body)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_while(code_str=code_string, while_node=wl, root=root, cond=expr_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def get_tokens_replace_while(cls, code_str, while_node, root, cond, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
if (child == while_node):
tokens.extend(((((['for', '(', ';'] + cond) + [';', ')', '{']) + body) + ['}']))
else:
tokens += cls.get_tokens_replace_while(code_str, while_node, child, cond, body)
return tokens
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary_expression'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[2]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if (str(w.type) == 'else'):
flagx = 1
if (str(w.type) == 'if_statement'):
not_consider.append(w)
flagy = 1
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
@classmethod
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'block'):
if (flag == 0):
first_block = current_node
flag = 1
else:
second_block = current_node
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def get_breaking_statements(cls, block):
breakings = ['continue_statement', 'break_statement', 'return_statement']
statements = []
stack = [block]
while (len(stack) > 0):
top = stack.pop()
if (str(top.type) in breakings):
statements.append(top)
else:
for child in top.children:
stack.append(child)
return statements
|
class GoProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
return (((f'for {control_variable} := 0 ; {control_variable} < 0; {control_variable}++' + ' { ') + body) + ' } ')
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (((f'for false ' + ' { ') + body) + ' } ')
elif (p < 0.66):
return (((f'for {control_variable} > {control_variable} ' + ' { ') + body) + ' } ')
else:
return (((f'for {control_variable} < {control_variable} ' + ' { ') + body) + ' } ')
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (((f'if false ' + ' { ') + body) + ' } ')
elif (p < 0.66):
return (((f'if {control_variable} > {control_variable} ' + ' { ') + body) + ' } ')
else:
return (((f'if {control_variable} < {control_variable} ' + ' { ') + body) + ' } ')
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary_expression'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = get_tokens(code_str, root)
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if (str(w.type) == 'else'):
flagx = 1
if (str(w.type) == 'if_statement'):
not_consider.append(w)
flagy = 1
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
@classmethod
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'block'):
if (flag == 0):
first_block = current_node
flag = 1
else:
second_block = current_node
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
|
class JavaAndCPPProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
p = np.random.uniform(0, 1)
if (p < 0.5):
prefix = (((((('for ( int ' + control_variable) + ' = 0 ; ') + control_variable) + ' > 0 ; ') + control_variable) + ' ++ ) { ')
loop = ((prefix + body) + ' } ')
return loop
else:
return (('for ( ; false ; ) { ' + body) + '}')
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('while ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('while ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('while ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('if ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('if ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('if ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def for_to_while_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_for_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = JavaAndCPPProcessor.for_to_while(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_string, root))
return (root, code_string, success)
@classmethod
def while_to_for_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_while_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = JavaAndCPPProcessor.while_to_for(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_string, root))
except:
pass
return (root, code_string, success)
@classmethod
def extract_for_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'for_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
@classmethod
def get_tokens_replace_for(cls, code_str, for_node, root, init, cond, update, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
if (child == for_node):
tokens.extend(((((((init + ['while', '(']) + cond) + [')', '{']) + body) + update) + ['}']))
else:
tokens += JavaAndCPPProcessor.get_tokens_replace_for(code_str, for_node, child, init, cond, update, body)
return tokens
@classmethod
def extract_for_contents(cls, for_loop, code_string):
children = for_loop.children
init_part = children[2]
if str(init_part.type).endswith('expression'):
next_part_start = 4
init_tokens = (get_tokens(code_string, init_part) + [';'])
elif (str(init_part.type).endswith('statement') or str(init_part.type).endswith('declaration')):
next_part_start = 3
init_tokens = get_tokens(code_string, init_part)
else:
next_part_start = 3
init_tokens = []
comp_part = children[next_part_start]
if str(comp_part.type).endswith('expression'):
next_part_start += 2
comp_tokens = get_tokens(code_string, comp_part)
else:
comp_tokens = ['true']
next_part_start += 1
update_part = children[next_part_start]
if str(update_part.type).endswith('expression'):
next_part_start += 2
update_tokens = (get_tokens(code_string, update_part) + [';'])
else:
update_tokens = []
next_part_start += 1
block_part = children[next_part_start]
breaking_statements = cls.get_breaking_statements(block_part)
block_tokens = cls.get_tokens_insert_before(code_string, block_part, ' '.join(update_tokens), breaking_statements)
return (init_tokens, comp_tokens, update_tokens, block_tokens)
@classmethod
def get_tokens_insert_before(cls, code_str, root, insertion_code, insert_before_node):
if (not isinstance(insert_before_node, list)):
insert_before_node = [insert_before_node]
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return tokens
else:
return [code_str[root.start_byte:root.end_byte].decode()]
if (root in insert_before_node):
tokens += insertion_code.split()
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
ts = cls.get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
tokens += ts
return tokens
@classmethod
def get_breaking_statements(cls, block):
breakings = ['continue_statement', 'break_statement', 'return_statement']
statements = []
stack = [block]
while (len(stack) > 0):
top = stack.pop()
if (str(top.type) in breakings):
statements.append(top)
else:
for child in top.children:
stack.append(child)
return statements
@classmethod
def for_to_while(cls, code_string, root, fl, parser):
original_tokenized_code = ' '.join(get_tokens(code_string, root))
(init_tokens, comp_tokens, update_tokens, body_tokens) = cls.extract_for_contents(fl, code_string)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_for(code_str=code_string, for_node=fl, root=root, init=init_tokens, cond=comp_tokens, update=update_tokens, body=body_tokens)
if (original_tokenized_code == ' '.join(tokens)):
return (root, original_tokenized_code, False)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
@classmethod
def extract_while_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'while_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def while_to_for(cls, code_string, root, wl, parser):
children = wl.children
condition = children[1]
body = children[2]
if (str(condition.type) == 'parenthesized_expression'):
expr_tokens = get_tokens(code_string, condition.children[1])
body_tokens = get_tokens(code_string, body)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_while(code_str=code_string, while_node=wl, root=root, cond=expr_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def get_tokens_replace_while(cls, code_str, while_node, root, cond, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
if (child == while_node):
tokens.extend(((((['for', '(', ';'] + cond) + [';', ')', '{']) + body) + ['}']))
else:
tokens += JavaAndCPPProcessor.get_tokens_replace_while(code_str, while_node, child, cond, body)
return tokens
@classmethod
def conditional_removal(cls, code_string, parser):
root = parser.parse_code(code_string)
(assi_cond_expr, varde_cond_expr, ret_cond_expr) = cls.extract_conditional_expression(root)
success = False
if (len(assi_cond_expr) > 0):
try:
modified_tokens = cls.assignment_conditional_removal(code_string, assi_cond_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
(_, varde_cond_expr, ret_cond_expr) = cls.extract_conditional_expression(root)
except:
pass
if (len(varde_cond_expr) > 0):
try:
modified_tokens = cls.var_decl_ternary_removal(code_string, varde_cond_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
(_, _, ret_cond_expr) = cls.extract_conditional_expression(root)
except:
pass
if (len(ret_cond_expr) > 0):
try:
modified_tokens = cls.return_ternary_removal(code_string, ret_cond_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
except:
pass
return (root, code_string, success)
@classmethod
def assignment_conditional_removal(cls, code_string, assi_tern_expr, root, parser):
if isinstance(code_string, str):
code_string = code_string.encode()
assert isinstance(root, Node)
tokens = []
children = root.children
if (len(children) == 0):
tokens.append(cls.handle_terminal_node(root, code_string))
for child in children:
if (child in assi_tern_expr):
if (str(child.children[0].type) == 'conditional_expression'):
cond_children = child.children[0].children
if (str(cond_children[0].type) == 'assignment_expression'):
assignee_token = get_tokens(code_string, cond_children[0].children[0])[0]
condition_tokens = get_tokens(code_string, cond_children[0].children[2])
if (str(cond_children[0].children[2].type) == 'parenthesized_expression'):
condition_tokens = condition_tokens[1:(- 1)]
br1_tokens = get_tokens(code_string, cond_children[2])
br2_tokens = get_tokens(code_string, cond_children[4])
tokens.extend(((((((['if', '('] + condition_tokens) + [')', '{', assignee_token, '=']) + br1_tokens) + [';', '}', 'else', '{', assignee_token, '=']) + br2_tokens) + [';', '}']))
else:
tokens += JavaAndCPPProcessor.assignment_conditional_removal(code_string, assi_tern_expr, child, parser)
return tokens
@classmethod
def extract_conditional_expression(cls, root):
assi_con_expr = []
varde_con_expr = []
ret_con_expr = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if ((str(current_node.type) == 'conditional_expression') and (str(current_node.children[0].type) == 'assignment_expression')):
assi_con_expr.append(current_node.parent)
if ((str(current_node.type) == 'conditional_expression') and (str(current_node.parent.type) == 'init_declarator')):
varde_con_expr.append(current_node.parent.parent)
if ((str(current_node.type) == 'conditional_expression') and (str(current_node.parent.type) == 'return_statement')):
ret_con_expr.append(current_node.parent)
for child in current_node.children:
queue.append(child)
return (assi_con_expr, varde_con_expr, ret_con_expr)
@classmethod
def ternary_removal(cls, code_string, parser):
code_string = cls.remove_package_and_import(code_string)
root = parser.parse_code(code_string)
(assi_tern_expr, varde_tern_expr, ret_tern_expr) = cls.extract_ternary_expression(root)
success = False
if (len(assi_tern_expr) > 0):
try:
modified_tokens = cls.assignment_ternary_removal(code_string, assi_tern_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
(_, varde_tern_expr, ret_tern_expr) = cls.extract_ternary_expression(root)
except:
pass
if (len(varde_tern_expr) > 0):
try:
modified_tokens = cls.var_decl_ternary_removal(code_string, varde_tern_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
(_, _, ret_tern_expr) = cls.extract_ternary_expression(root)
except:
pass
if (len(ret_tern_expr) > 0):
try:
modified_tokens = cls.return_ternary_removal(code_string, ret_tern_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
except:
pass
return (root, code_string, success)
@classmethod
def ternary_body_write(cls, body, code_string, assignee, tokens, ret=False):
body_children = body.children
condition_tokens = get_tokens(code_string, body_children[0])
if (str(body_children[0].type) == 'parenthesized_expression'):
condition_tokens = condition_tokens[1:(- 1)]
br1_tokens = get_tokens(code_string, body_children[2])
if (str(body_children[2].type) == 'parenthesized_expression'):
br1_tokens = br1_tokens[1:(- 1)]
br2_tokens = get_tokens(code_string, body_children[4])
if (str(body_children[4].type) == 'parenthesized_expression'):
br2_tokens = br2_tokens[1:(- 1)]
assignee_token = get_tokens(code_string, assignee)[0]
if ret:
tokens.extend(((((((['if', '('] + condition_tokens) + [')', '{', assignee_token]) + br1_tokens) + [';', '}', 'else', '{', assignee_token]) + br2_tokens) + [';', '}']))
else:
tokens.extend(((((((['if', '('] + condition_tokens) + [')', '{', assignee_token, '=']) + br1_tokens) + [';', '}', 'else', '{', assignee_token, '=']) + br2_tokens) + [';', '}']))
return tokens
@classmethod
def assignment_ternary_removal(cls, code_string, assi_tern_expr, root, parser):
if isinstance(code_string, str):
code_string = code_string.encode()
assert isinstance(root, Node)
tokens = []
children = root.children
if (len(children) == 0):
tokens.append(cls.handle_terminal_node(root, code_string))
for child in children:
if (child in assi_tern_expr):
te_children = child.children
assignee = te_children[0]
body = te_children[2]
tokens = cls.ternary_body_write(body, code_string, assignee, tokens)
break
else:
tokens += JavaAndCPPProcessor.assignment_ternary_removal(code_string, assi_tern_expr, child, parser)
return tokens
@classmethod
def var_decl_ternary_removal(cls, code_string, var_decl_tern_expr, root, parser):
if isinstance(code_string, str):
code_string = code_string.encode()
assert isinstance(root, Node)
tokens = []
children = root.children
if (len(children) == 0):
tokens.append(cls.handle_terminal_node(root, code_string))
for child in children:
if (child in var_decl_tern_expr):
for c in child.children:
if (str(c.type) == ';'):
continue
elif ((str(c.type) == 'variable_declarator') or (str(c.type) == 'init_declarator')):
te_children = c.children
assignee = te_children[0]
assignee_token = get_tokens(code_string, assignee)[0]
tokens.extend([assignee_token, ';'])
body = te_children[2]
tokens = cls.ternary_body_write(body, code_string, assignee, tokens)
else:
tokens += get_tokens(code_string, c)
else:
tokens += JavaAndCPPProcessor.var_decl_ternary_removal(code_string, var_decl_tern_expr, child, parser)
return tokens
@classmethod
def return_ternary_removal(cls, code_string, ret_tern_expr, root, parser):
if isinstance(code_string, str):
code_string = code_string.encode()
assert isinstance(root, Node)
tokens = []
children = root.children
if (len(children) == 0):
tokens.append(cls.handle_terminal_node(root, code_string))
for child in children:
if (child in ret_tern_expr):
te_children = child.children
assignee = te_children[0]
body = te_children[1]
tokens = cls.ternary_body_write(body, code_string, assignee, tokens, ret=True)
break
else:
tokens += JavaAndCPPProcessor.return_ternary_removal(code_string, ret_tern_expr, child, parser)
return tokens
@classmethod
def extract_ternary_expression(cls, root):
assi_ten_expr = []
varde_ten_expr = []
ret_ten_expr = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if ((str(current_node.type) == 'ternary_expression') and (str(current_node.parent.type) == 'assignment_expression')):
assi_ten_expr.append(current_node.parent)
if ((str(current_node.type) == 'ternary_expression') and (str(current_node.parent.type) == 'variable_declarator')):
varde_ten_expr.append(current_node.parent.parent)
if ((str(current_node.type) == 'ternary_expression') and (str(current_node.parent.type) == 'return_statement')):
ret_ten_expr.append(current_node.parent)
for child in current_node.children:
queue.append(child)
return (assi_ten_expr, varde_ten_expr, ret_ten_expr)
@classmethod
def incre_decre_removal(cls, code_string, parser):
root = parser.parse_code(code_string)
(pre_expr, post_expr) = cls.extract_incre_decre_expression(root, code_string)
success = False
if (len(pre_expr) > 0):
try:
modified_tokens = cls.pre_incre_decre_removal(code_string, pre_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
(_, post_expr) = cls.extract_incre_decre_expression(root, code_string)
except:
pass
if (len(post_expr) > 0):
try:
modified_tokens = cls.post_incre_decre_removal(code_string, post_expr, root, parser)
code_string = cls.beautify_java_code(modified_tokens)
root = parser.parse_code(code_string)
success = True
except:
pass
return (root, code_string, success)
@classmethod
def pre_incre_decre_removal(cls, code_string, pre_expr, root, parser):
if isinstance(code_string, str):
code_string = code_string.encode()
assert isinstance(root, Node)
tokens = []
children = root.children
if (len(children) == 0):
tokens.append(cls.handle_terminal_node(root, code_string))
for child in children:
if (child in pre_expr):
expr = child.children[0]
assignee = expr.children[0]
assignee_token = get_tokens(code_string, assignee)[0]
op = ''
if (str(expr.children[2].children[0].type) == '--'):
op = '-='
elif (str(expr.children[2].children[0].type) == '++'):
op = '+='
assigner = expr.children[2].children[(- 1)]
assigner_token = get_tokens(code_string, assigner)[0]
tokens.extend([assigner_token, op, '1', ';', assignee_token, '=', assigner_token, ';'])
else:
tokens += JavaAndCPPProcessor.pre_incre_decre_removal(code_string, pre_expr, child, parser)
return tokens
@classmethod
def post_incre_decre_removal(cls, code_string, post_expr, root, parser):
if isinstance(code_string, str):
code_string = code_string.encode()
assert isinstance(root, Node)
tokens = []
children = root.children
if (len(children) == 0):
tokens.append(cls.handle_terminal_node(root, code_string))
for child in children:
if (child in post_expr):
expr = child.children[0]
assignee = expr.children[0]
assignee_token = get_tokens(code_string, assignee)[0]
op = ''
if (str(expr.children[2].children[(- 1)].type) == '--'):
op = '-='
elif (str(expr.children[2].children[(- 1)].type) == '++'):
op = '+='
assigner = expr.children[2].children[0]
assigner_token = get_tokens(code_string, assigner)[0]
tokens.extend([assignee_token, '=', assigner_token, ';', assigner_token, op, '1', ';'])
else:
tokens += JavaAndCPPProcessor.post_incre_decre_removal(code_string, post_expr, child, parser)
return tokens
@classmethod
def extract_incre_decre_expression(cls, root, code_string):
pre_expr = []
post_expr = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (((str(current_node.type) == '++') or (str(current_node.type) == '--')) and ((str(current_node.parent.type) == 'update_expression') or (str(current_node.parent.type) == 'postfix_unary_expression') or (str(current_node.parent.type) == 'prefix_unary_expression')) and (str(current_node.parent.parent.type) == 'assignment_expression')):
nodes = current_node.parent.parent.children
if ((len(nodes) == 3) and (str(nodes[0].type) == 'identifier')):
if ((str(nodes[2].children[0].type) == '++') or (str(nodes[2].children[0].type) == '--')):
pre_expr.append(current_node.parent.parent.parent)
else:
post_expr.append(current_node.parent.parent.parent)
for child in current_node.children:
queue.append(child)
return (pre_expr, post_expr)
@classmethod
def handle_terminal_node(cls, root_node, code_string):
if (root_node.type == 'comment'):
str_const = ''
else:
str_const = code_string[root_node.start_byte:root_node.end_byte].decode('utf-8')
return str_const
@classmethod
def remove_package_and_import(cls, code):
if isinstance(code, str):
code = code.encode()
code = code.decode().split('\n')
lines = [line.rstrip('\n') for line in code]
current_code_lines = []
for line in lines:
if (line.strip().startswith('import') or line.strip().startswith('package') or line.strip().startswith('#include')):
continue
current_code_lines.append(line)
code = ('\n'.join(current_code_lines) if len(current_code_lines) else '')
return code.encode()
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary_expression'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if (str(w.type) == 'else'):
flagx = 1
if (str(w.type) == 'if_statement'):
not_consider.append(w)
flagy = 1
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
@classmethod
def block_swap_java(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'block'):
if (flag == 0):
first_block = current_node
flag = 1
else:
second_block = current_node
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def block_swap_c(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'compound_statement'):
if (flag == 0):
first_block = current_node
flag = 1
else:
second_block = current_node
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
|
class JavascriptProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
p = np.random.uniform(0, 1)
if (p < 0.5):
prefix = (((((('for ( let ' + control_variable) + ' = 0 ; ') + control_variable) + ' > 0 ; ') + control_variable) + ' ++ ) { ')
loop = ((prefix + body) + ' } ')
return loop
else:
return (('for ( ; false ; ) { ' + body) + '}')
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('while ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('while ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('while ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('if ( false ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('if ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('if ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def get_tokens_insert_before(cls, code_str, root, insertion_code, insert_before_node):
if (not isinstance(insert_before_node, list)):
insert_before_node = [insert_before_node]
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
if (root in insert_before_node):
tokens += insertion_code.split()
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
ts = cls.get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
tokens += ts
return tokens
@classmethod
def get_tokens(cls, code, root):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code[root.start_byte:root.end_byte].decode())
else:
for child in children:
ts = cls.get_tokens(code, child)
tokens += ts
return tokens
@classmethod
def get_breaking_statements(cls, block):
breakings = ['continue_statement', 'break_statement', 'return_statement']
statements = []
stack = [block]
while (len(stack) > 0):
top = stack.pop()
if (str(top.type) in breakings):
statements.append(top)
else:
for child in top.children:
stack.append(child)
return statements
@classmethod
def for_to_while_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_for_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.for_to_while(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
except:
pass
if (not success):
ts = cls.get_tokens(code_string, root)
code_string = cls.beautify_java_code(ts)
return (root, code_string, success)
@classmethod
def while_to_for_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_while_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.while_to_for(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
if (not success):
ts = cls.get_tokens(code_string, root)
code_string = cls.beautify_java_code(ts)
except:
pass
return (root, code_string, success)
@classmethod
def extract_for_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'for_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
@classmethod
def get_tokens_replace_for(cls, code_str, for_node, root, init, cond, update, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
if (child == for_node):
tokens.extend(((((((init + ['while', '(']) + cond) + [')', '{']) + body) + update) + ['}']))
else:
tokens += cls.get_tokens_replace_for(code_str, for_node, child, init, cond, update, body)
return tokens
@classmethod
def for_to_while(cls, code_string, root, fl, parser):
children = fl.children
init = children[2]
init_tokens = cls.get_tokens(code_string, init)
comparison = children[3]
if (str(comparison.type) != ';'):
comp_tokens = cls.get_tokens(code_string, comparison)
if (comp_tokens[(- 1)] == ';'):
comp_tokens = comp_tokens[:(- 1)]
update = children[4]
if (str(update.type) == ')'):
update_tokens = []
body = children[5]
else:
update_tokens = (cls.get_tokens(code_string, update) + [';'])
body = children[6]
breaking_statements = cls.get_breaking_statements(body)
body_tokens = cls.get_tokens_insert_before(code_string, body, ' '.join(update_tokens), breaking_statements)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_for(code_str=code_string, for_node=fl, root=root, init=init_tokens, cond=comp_tokens, update=update_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def extract_while_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'while_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def while_to_for(cls, code_string, root, wl, parser):
children = wl.children
condition = children[1]
body = children[2]
if (str(condition.type) == 'parenthesized_expression'):
expr_tokens = cls.get_tokens(code_string, condition.children[1])
body_tokens = cls.get_tokens(code_string, body)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_while(code_str=code_string, while_node=wl, root=root, cond=expr_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def get_tokens_replace_while(cls, code_str, while_node, root, cond, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
if (child == while_node):
tokens.extend(((((['for', '(', ';'] + cond) + [';', ')', '{']) + body) + ['}']))
else:
tokens += cls.get_tokens_replace_while(code_str, while_node, child, cond, body)
return tokens
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary_expression'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=', '===', '!==']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
elif (opt == '==='):
tokens.append('===')
elif (opt == '!=='):
tokens.append('!==')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|', 'and', 'or']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if ((str(w.type) == 'else') or (str(w.type) == 'else_clause')):
flagx = 1
m = w.children
for x in m:
if (str(x.type) == 'if_statement'):
not_consider.append(x)
flagy = 1
break
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
@classmethod
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'statement_block'):
first_block = current_node
elif (str(current_node.type) == 'else_clause'):
new_list = current_node.children
for w in new_list:
if (str(w.type) == 'statement_block'):
second_block = w
break
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
|
class PhpProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('$_i_' + str(np.random.choice(list(range(10)))))
p = np.random.uniform(0, 1)
if (p < 0.5):
prefix = f'for ( {control_variable} = 0 ; {control_variable} > 0 ; {control_variable}++ )'
loop = (((prefix + '{ ') + body) + '}')
return loop
else:
return (('for ( ; FALSE ; ) { ' + body) + '}')
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('$_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('while ( FALSE ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('while ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('while ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('$_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('if ( FALSE ) { ' + body) + ' }')
elif (p < 0.66):
return (((((('if ( ' + control_variable) + ' < ') + control_variable) + ' ) { ') + body) + ' } ')
else:
return (((((('if ( ' + control_variable) + ' > ') + control_variable) + ' ) { ') + body) + ' } ')
@classmethod
def get_tokens_insert_before(cls, code_str, root, insertion_code, insert_before_node):
if (not isinstance(insert_before_node, list)):
insert_before_node = [insert_before_node]
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
if (root in insert_before_node):
tokens += insertion_code.split()
children = root.children
if ((len(children) == 0) or (str(root.type) in ['variable_name', 'encapsed_string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
ts = cls.get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
tokens += ts
return tokens
@classmethod
def get_tokens(cls, code, root):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['variable_name', 'encapsed_string'])):
tokens.append(code[root.start_byte:root.end_byte].decode())
else:
for child in children:
ts = cls.get_tokens(code, child)
tokens += ts
return tokens
@classmethod
def for_to_while_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_for_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.for_to_while(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
except:
pass
if (not success):
ts = cls.get_tokens(code_string, root)
code_string = cls.beautify_java_code(ts)
return (root, code_string, success)
@classmethod
def while_to_for_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_while_loops(root)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.while_to_for(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
if (not success):
ts = cls.get_tokens(code_string, root)
code_string = cls.beautify_java_code(ts)
except:
pass
return (root, code_string, success)
@classmethod
def extract_for_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'for_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
@classmethod
def get_tokens_replace_for(cls, code_str, for_node, root, init, cond, update, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['variable_name', 'encapsed_string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
if (child == for_node):
tokens.extend(((((((init + [';', 'while', '(']) + cond) + [')', '{']) + body) + update) + ['}']))
else:
tokens += cls.get_tokens_replace_for(code_str, for_node, child, init, cond, update, body)
return tokens
@classmethod
def get_breaking_statements(cls, block):
breakings = ['continue_statement', 'break_statement', 'return_statement']
statements = []
stack = [block]
while (len(stack) > 0):
top = stack.pop()
if (str(top.type) in breakings):
statements.append(top)
else:
for child in top.children:
stack.append(child)
return statements
@classmethod
def for_to_while(cls, code_string, root, fl, parser):
children = fl.children
init = children[2]
init_tokens = cls.get_tokens(code_string, init)
comparison = children[4]
if (str(comparison.type) != ';'):
comp_tokens = cls.get_tokens(code_string, comparison)
update = children[6]
if (str(update.type) == ')'):
update_tokens = []
body = children[7]
else:
update_tokens = (cls.get_tokens(code_string, update) + [';'])
body = children[8]
breaking_statements = cls.get_breaking_statements(body)
body_tokens = cls.get_tokens_insert_before(code_string, body, ' '.join(update_tokens), breaking_statements)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_for(code_str=code_string, for_node=fl, root=root, init=init_tokens, cond=comp_tokens, update=update_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def extract_while_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'while_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def while_to_for(cls, code_string, root, wl, parser):
children = wl.children
condition = children[1]
body = children[2]
if (str(condition.type) == 'parenthesized_expression'):
expr_tokens = cls.get_tokens(code_string, condition.children[1])
body_tokens = cls.get_tokens(code_string, body)
if ((len(body_tokens) >= 2) and ((body_tokens[0] == '{') and (body_tokens[(- 1)] == '}'))):
body_tokens = body_tokens[1:(- 1)]
tokens = cls.get_tokens_replace_while(code_str=code_string, while_node=wl, root=root, cond=expr_tokens, body=body_tokens)
code = cls.beautify_java_code(tokens)
return (parser.parse_code(code), code, True)
return (root, code_string, False)
@classmethod
def get_tokens_replace_while(cls, code_str, while_node, root, cond, body):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if ((len(children) == 0) or (str(root.type) in ['variable_name', 'encapsed_string'])):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
else:
for child in children:
if (child == while_node):
tokens.extend(((((['for', '(', ';'] + cond) + [';', ')', '{']) + body) + ['}']))
else:
tokens += cls.get_tokens_replace_while(code_str, while_node, child, cond, body)
return tokens
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary_expression'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=', '<>', '===', '!==']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
elif (opt == '<>'):
tokens.append('<>')
elif (opt == '==='):
tokens.append('===')
elif (opt == '!=='):
tokens.append('!==')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if (str(w.type) == 'else_clause'):
flagx = 1
if (str(w.type) == 'else_if_clause'):
not_consider.append(w)
flagy = 1
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
@classmethod
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'compound_statement'):
first_block = current_node
elif (str(current_node.type) == 'else_clause'):
second_block = current_node.children[1]
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
|
class PythonProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
loop = f'NEWLINE for {control_variable} in range ( 0 ) : NEWLINE INDENT {body} NEWLINE DEDENT '
return loop
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return f'while False : NEWLINE INDENT {body} NEWLINE DEDENT'
elif (p < 0.66):
return f'while {control_variable} < {control_variable} : NEWLINE INDENT {body} NEWLINE DEDENT'
else:
return f'while {control_variable} > {control_variable} : NEWLINE INDENT {body} NEWLINE DEDENT'
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return f'if False : NEWLINE INDENT {body} NEWLINE DEDENT'
elif (p < 0.66):
return f'if {control_variable} < {control_variable} : NEWLINE INDENT {body} NEWLINE DEDENT'
else:
return f'if {control_variable} > {control_variable} : NEWLINE INDENT {body} NEWLINE DEDENT'
@classmethod
def get_tokens_insert_before(cls, code_str, root, insertion_code, insert_before_node):
if (not isinstance(insert_before_node, list)):
insert_before_node = [insert_before_node]
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return tokens
else:
return [code_str[root.start_byte:root.end_byte].decode()]
if (root in insert_before_node):
tokens += insertion_code.split()
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if (child_type == 'block'):
tokens += ['NEWLINE', 'INDENT']
ts = cls.get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
tokens += ts
if child_type.endswith('statement'):
tokens.append('NEWLINE')
elif (child_type == 'block'):
tokens.append('DEDENT')
return tokens
@classmethod
def get_tokens(cls, code, root):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return tokens
else:
return [code[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if (child_type == 'block'):
tokens += ['NEWLINE', 'INDENT']
ts = cls.get_tokens(code, child)
tokens += ts
if child_type.endswith('statement'):
tokens.append('NEWLINE')
elif (child_type == 'block'):
tokens.append('DEDENT')
return tokens
@classmethod
def for_to_while_random(cls, code_string, parser):
root = parser.parse_code(code_string)
loops = cls.extract_for_loops(root, code_string)
success = False
try:
while ((not success) and (len(loops) > 0)):
selected_loop = np.random.choice(loops)
loops.remove(selected_loop)
(modified_root, modified_code_string, success) = cls.for_to_while(code_string, root, selected_loop, parser)
if success:
root = modified_root
code_string = modified_code_string
except:
pass
if (not success):
code_string = cls.beautify_python_code(cls.get_tokens(code_string, root))
else:
code_string = cls.beautify_python_code(code_string.split())
return (root, code_string, success)
@classmethod
def while_to_for_random(cls, code_string, parser):
root = parser.parse_code(code_string)
return (root, code_string, False)
@classmethod
def extract_for_loops(cls, root, code_str):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'for_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def beautify_python_code(cls, tokens):
indent_count = 0
code = ''
i = 0
while (i < len(tokens)):
token = tokens[i]
if (token == 'NEWLINE'):
code += '\n'
for _ in range(indent_count):
code += '\t'
elif (token == 'INDENT'):
indent_count += 1
code += '\t'
elif (token == 'DEDENT'):
indent_count -= 1
if (code[(- 1)] == '\t'):
code = code[:(- 1)]
else:
code += (token + ' ')
i += 1
lines = code.split('\n')
taken_lines = []
for line in lines:
if (len(line.strip()) > 0):
taken_lines.append(line.rstrip())
code = '\n'.join(taken_lines)
return code
@classmethod
def get_tokens_replace_for(cls, code_str, for_node, root, while_node):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return tokens
else:
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
if (child == for_node):
tokens += while_node
else:
child_type = str(child.type)
if (child_type == 'block'):
tokens += ['NEWLINE', 'INDENT']
tokens += cls.get_tokens_replace_for(code_str, for_node, child, while_node)
if child_type.endswith('statement'):
tokens.append('NEWLINE')
elif (child_type == 'block'):
tokens.append('DEDENT')
return tokens
@classmethod
def for_to_while(cls, code_string, root, fl, parser):
try:
identifier = fl.children[1]
in_node = fl.children[2]
range_node = fl.children[3]
body_node = fl.children[5]
range_function = range_node.children[0]
range_function_name = cls.get_tokens(code_string, range_function)[0]
if ((range_function_name == 'range') and ((str(identifier.type) == 'identifier') and (len(identifier.children) == 0)) and ((str(in_node.type) == 'in') and (len(in_node.children) == 0))):
argument_list = range_node.children[1].children
args = []
for a in argument_list:
k = str(a.type)
if (k not in ['(', ',', ')']):
args.append(a)
(start, stop, step) = (['0'], ['0'], ['1'])
if (len(args) == 1):
stop = cls.get_tokens(code_string, args[0])
elif (len(args) == 2):
start = cls.get_tokens(code_string, args[0])
stop = cls.get_tokens(code_string, args[1])
else:
start = cls.get_tokens(code_string, args[0])
stop = cls.get_tokens(code_string, args[1])
step = cls.get_tokens(code_string, args[2])
identifier_name = cls.get_tokens(code_string, identifier)[0]
terminal_statements = cls.find_terminal_statement(body_node)
body_tokens = cls.get_tokens_insert_before(code_string, body_node, ' '.join((([identifier_name, '+='] + step) + ['NEWLINE'])), terminal_statements)
while_stmt = ((((((((([identifier_name, '='] + start) + ['NEWLINE']) + ['while', identifier_name, 'in', 'list', '(', 'range', '(']) + stop) + [')', ')', ':', 'NEWLINE', 'INDENT']) + body_tokens) + ['NEWLINE', identifier_name, '+=']) + step) + ['DEDENT', 'NEWLINE'])
tokens = cls.get_tokens_replace_for(code_str=code_string, for_node=fl, while_node=while_stmt, root=root)
code = cls.beautify_python_code(tokens)
return (parser.parse_code(code), ' '.join(tokens), True)
except:
pass
return (root, code_string, False)
@classmethod
def find_terminal_statement(cls, body_node):
statements = ['continue_statement', 'break_statement', 'return_statement']
terminals = []
stack = [body_node]
while (len(stack) > 0):
top = stack.pop()
if (str(top.type) in statements):
terminals.append(top)
else:
for child in top.children:
stack.append(child)
return terminals
pass
@classmethod
def extract_while_loops(cls, root):
loops = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'while_statement'):
loops.append(current_node)
for child in current_node.children:
queue.append(child)
return loops
@classmethod
def while_to_for(cls, code_string, root, wl, parser):
return (root, code_string, False)
@classmethod
def get_tokens_replace_while(cls, code_str, while_node, root, cond, body):
raise NotImplementedError
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'comparison_operator'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return (tokens, None)
else:
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if (child_type == 'block'):
tokens += ['NEWLINE', 'INDENT']
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
if child_type.endswith('statement'):
tokens.append('NEWLINE')
elif (child_type == 'block'):
tokens.append('DEDENT')
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_python_code(cls.get_tokens(code_str, root))
else:
code_string = cls.beautify_python_code(code_string.split())
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'if_statement'):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if (not ((counter == 1) and (extra_counter == 0))):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if (str(w.type) == 'else_clause'):
flagx = 1
if (str(w.type) == 'elif_clause'):
flagy = 1
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
parent = root.parent
if (len(parent.children) == 1):
return (tokens, None)
else:
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if (child_type == 'block'):
tokens += ['NEWLINE', 'INDENT']
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
if child_type.endswith('statement'):
tokens.append('NEWLINE')
elif (child_type == 'block'):
tokens.append('DEDENT')
return (tokens, None)
@classmethod
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'block'):
first_block = current_node
elif (str(current_node.type) == 'else_clause'):
new_list = current_node.children
for w in new_list:
if (str(w.type) == 'block'):
second_block = w
break
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_python_code(cls.get_tokens(code_str, root))
else:
code_string = cls.beautify_python_code(code_string.split())
return (code_string, success)
|
def get_python_tokens(code, root=None):
if isinstance(code, bytes):
code = code.decode()
tokens = []
for token in tokenize.tokenize(BytesIO(code.encode('utf-8')).readline):
if ((token.type == 0) or (token.type >= 58)):
continue
elif (token.type == 4):
tokens.append('NEWLINE')
elif (token.type == 5):
tokens.append('INDENT')
elif (token.type == 6):
tokens.append('DEDENT')
else:
tokens.append(token.string)
return (tokens, None)
|
class RubyProcessor():
@classmethod
def create_dead_for_loop(cls, body):
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
return f'for {control_variable} in 0..0 do {body} end '
@classmethod
def create_dead_while_loop(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('until false do ' + body) + ' end ')
elif (p < 0.66):
return (((((('until ' + control_variable) + ' < ') + control_variable) + ' do ') + body) + ' end ')
else:
return (((((('until ' + control_variable) + ' > ') + control_variable) + ' do ') + body) + ' end ')
@classmethod
def create_dead_if(cls, body):
p = np.random.uniform(0, 1)
control_variable = ('_i_' + str(np.random.choice(list(range(10)))))
if (p < 0.33):
return (('if false then ' + body) + ' end ')
elif (p < 0.66):
return (((((('if ' + control_variable) + ' < ') + control_variable) + ' then ') + body) + ' end ')
else:
return (((((('if ' + control_variable) + ' > ') + control_variable) + ' then ') + body) + ' end ')
@classmethod
def extract_expression(self, root, code):
expressions = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (str(current_node.type) == 'binary'):
children_nodes = current_node.children
keep = ['<', '>', '<=', '>=', '==', '!=', '===']
counter = 0
for w in children_nodes:
if (str(w.type) in keep):
counter = (counter + 1)
if (counter == 1):
expressions.append(current_node)
for child in current_node.children:
queue.append(child)
return expressions
@classmethod
def get_tokens_for_opswap(cls, code, root, left_oprd, operator, right_oprd):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == operator.start_byte) and (root.end_byte == operator.end_byte)):
opt = code[operator.start_byte:operator.end_byte].decode()
if (opt == '<'):
tokens.append('>')
elif (opt == '>'):
tokens.append('<')
elif (opt == '>='):
tokens.append('<=')
elif (opt == '<='):
tokens.append('>=')
elif (opt == '=='):
tokens.append('==')
elif (opt == '!='):
tokens.append('!=')
elif (opt == '==='):
tokens.append('===')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
if ((child.start_byte == left_oprd.start_byte) and (child.end_byte == left_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, right_oprd, left_oprd, operator, right_oprd)
elif ((child.start_byte == right_oprd.start_byte) and (child.end_byte == right_oprd.end_byte)):
(ts, _) = cls.get_tokens_for_opswap(code, left_oprd, left_oprd, operator, right_oprd)
else:
(ts, _) = cls.get_tokens_for_opswap(code, child, left_oprd, operator, right_oprd)
tokens += ts
return (tokens, None)
@classmethod
def operand_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
expressions = cls.extract_expression(root, code)
success = False
try:
while ((not success) and (len(expressions) > 0)):
selected_exp = np.random.choice(expressions)
expressions.remove(selected_exp)
bin_exp = selected_exp
condition = code[bin_exp.start_byte:bin_exp.end_byte].decode()
bin_exp = bin_exp.children
left_oprd = bin_exp[0]
operator = bin_exp[1]
right_oprd = bin_exp[2]
try:
code_list = cls.get_tokens_for_opswap(code, root, left_oprd, operator, right_oprd)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = get_tokens(code_str, root)
return (code_string, success)
@classmethod
def extract_if_else(cls, root, code_str, operator_list):
ext_opt_list = ['&&', '&', '||', '|', 'and', 'or']
expressions = []
queue = [root]
not_consider = []
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (((str(current_node.type) == 'if') or (str(current_node.type) == 'unless')) and (len(code_str[current_node.start_byte:current_node.end_byte].decode()) > 6)):
clause = code_str[current_node.start_byte:current_node.end_byte].decode()
des = current_node.children[1]
cond = code_str[des.start_byte:des.end_byte].decode()
stack = [des]
nodes = []
while (len(stack) > 0):
root1 = stack.pop()
if (len(root1.children) == 0):
nodes.append(root1)
for child in root1.children:
stack.append(child)
nodes.reverse()
counter = 0
extra_counter = 0
for w in nodes:
if (str(w.type) in operator_list):
counter = (counter + 1)
if (str(w.type) in ext_opt_list):
extra_counter = (extra_counter + 1)
if ((counter > 1) or (extra_counter > 0)):
continue
children_nodes = current_node.children
flagx = 0
flagy = 0
for w in children_nodes:
if (str(w.type) == 'else'):
flagx = 1
if (str(w.type) == 'elsif'):
not_consider.append(w)
flagy = 1
if ((flagx == 1) and (flagy == 0)):
expressions.append([current_node, des])
for child in current_node.children:
if (child not in not_consider):
queue.append(child)
return expressions
@classmethod
def get_tokens_for_blockswap(cls, code, root, first_block, opt_node, second_block, flagx, flagy):
if isinstance(code, str):
code = code.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return (tokens, None)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], None)
children = root.children
if (len(children) == 0):
if ((root.start_byte == opt_node.start_byte) and (root.end_byte == opt_node.end_byte)):
op = code[root.start_byte:root.end_byte].decode()
if (op == '<'):
tokens.append('>=')
elif (op == '>'):
tokens.append('<=')
elif (op == '>='):
tokens.append('<')
elif (op == '<='):
tokens.append('>')
elif (op == '!='):
tokens.append('==')
elif (op == '=='):
tokens.append('!=')
else:
tokens.append(code[root.start_byte:root.end_byte].decode())
for child in children:
child_type = str(child.type)
if ((child.start_byte == first_block.start_byte) and (child.end_byte == first_block.end_byte) and (flagx == 0) and (str(child.type) == str(first_block.type))):
flagx = 1
(ts, _) = cls.get_tokens_for_blockswap(code, second_block, first_block, opt_node, second_block, flagx, flagy)
elif ((child.start_byte == second_block.start_byte) and (child.end_byte == second_block.end_byte) and (flagy == 0) and (str(child.type) == str(second_block.type))):
flagy = 1
(ts, _) = cls.get_tokens_for_blockswap(code, first_block, first_block, opt_node, second_block, flagx, flagy)
else:
(ts, _) = cls.get_tokens_for_blockswap(code, child, first_block, opt_node, second_block, flagx, flagy)
tokens += ts
return (tokens, None)
@classmethod
def block_swap(cls, code_str, parser):
code = code_str.encode()
root = parser.parse_code(code)
operator_list = ['<', '>', '<=', '>=', '==', '!=']
pair = cls.extract_if_else(root, code, operator_list)
success = False
lst = list(range(0, len(pair)))
try:
while ((not success) and (len(lst) > 0)):
selected = np.random.choice(lst)
lst.remove(selected)
clause = pair[selected][0]
des = pair[selected][1]
st = [des]
nodes = []
while (len(st) > 0):
root1 = st.pop()
if (len(root1.children) == 0):
nodes.append(root1)
if (code[root1.start_byte:root1.end_byte].decode() in operator_list):
opt_node = root1
break
for child in root1.children:
st.append(child)
nodes = clause.children
flag = 0
for current_node in nodes:
if (str(current_node.type) == 'then'):
first_block = current_node
elif (str(current_node.type) == 'else'):
second_block = current_node.children[1]
flagx = 0
flagy = 0
try:
code_list = cls.get_tokens_for_blockswap(code, root, first_block, opt_node, second_block, flagx, flagy)[0]
code_string = ''
for w in code_list:
code_string = ((code_string + w) + ' ')
code_string = code_string.strip()
success = True
except:
success = False
continue
except:
pass
if (not success):
code_string = cls.beautify_java_code(get_tokens(code_str, root))
return (code_string, success)
@classmethod
def beautify_java_code(cls, tokens):
code = ' '.join(tokens)
code = re.sub(' \\. ', '', code)
code = re.sub(' \\+\\+', '++', code)
return code
|
def get_tokens(code_str, root):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode().strip())
for child in children:
tokens += get_tokens(code_str, child)
return tokens
|
def get_tokens_insert_before(code_str, root, insertion_code, insert_before_node):
if isinstance(code_str, str):
code_str = code_str.encode()
assert isinstance(root, Node)
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
return [code_str[root.start_byte:root.end_byte].decode()]
if (root == insert_before_node):
tokens += insertion_code.split()
children = root.children
if (len(children) == 0):
tokens.append(code_str[root.start_byte:root.end_byte].decode())
for child in children:
tokens += get_tokens_insert_before(code_str, child, insertion_code, insert_before_node)
return tokens
|
def dfs_print(root, level=0):
for _ in range(level):
print('\t', end='')
print(root)
for child in root.children:
dfs_print(child, (level + 1))
|
def count_nodes(root):
num_nodes = 1
for child in root.children:
if (child is not None):
num_nodes += count_nodes(child)
return num_nodes
|
def extract_statement_within_size(root, max_node=10, endswith=None, code_string=None, tokenizer=None):
if (endswith is None):
endswith = ['statement']
statements = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
node_count = count_nodes(current_node)
if ((code_string is not None) and (tokenizer is not None)):
tokens = tokenizer(code_string, current_node)
current_code = ' '.join(tokens).strip()
else:
current_code = 'please provide code string and tokenizer to analyze code length'
if (any((str(current_node.type).endswith(e) for e in endswith)) and (1 < node_count < max_node) and (len(current_code) > 0)):
statements.append(current_node)
for child in current_node.children:
queue.append(child)
return statements
|
class BlockSwap(TransformationBase):
'\n Swapping if_else block\n '
def __init__(self, parser_path, language):
super(BlockSwap, self).__init__(parser_path=parser_path, language=language)
self.language = language
self.transformations = processor_function[language]
processor_map = {'java': self.get_tokens_with_node_type, 'c': self.get_tokens_with_node_type, 'cpp': self.get_tokens_with_node_type, 'c_sharp': self.get_tokens_with_node_type, 'javascript': JavascriptProcessor.get_tokens, 'python': PythonProcessor.get_tokens, 'php': PhpProcessor.get_tokens, 'ruby': self.get_tokens_with_node_type, 'go': self.get_tokens_with_node_type}
self.final_processor = processor_map[self.language]
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
success = False
transform_functions = copy.deepcopy(self.transformations)
while ((not success) and (len(transform_functions) > 0)):
function = np.random.choice(transform_functions)
transform_functions.remove(function)
(modified_code, success) = function(code, self)
if success:
code = modified_code
root_node = self.parse_code(code=code)
return_values = self.final_processor(code=code.encode(), root=root_node)
if isinstance(return_values, tuple):
(tokens, types) = return_values
else:
(tokens, types) = (return_values, None)
return (re.sub('[ \t\n]+', ' ', ' '.join(tokens)), {'types': types, 'success': success})
|
class ConfusionRemover(TransformationBase):
'\n Change the `for` loops with `while` loops and vice versa.\n '
def __init__(self, parser_path, language):
super(ConfusionRemover, self).__init__(parser_path=parser_path, language=language)
self.language = language
if (language in processor_function):
self.transformations = processor_function[language]
else:
self.transformations = []
processor_map = {'java': self.get_tokens_with_node_type, 'c': self.get_tokens_with_node_type, 'cpp': self.get_tokens_with_node_type, 'c_sharp': self.get_tokens_with_node_type, 'javascript': JavascriptProcessor.get_tokens, 'python': PythonProcessor.get_tokens, 'php': PhpProcessor.get_tokens, 'ruby': self.get_tokens_with_node_type, 'go': self.get_tokens_with_node_type}
self.final_processor = processor_map[self.language]
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
success = False
transform_functions = copy.deepcopy(self.transformations)
while ((not success) and (len(transform_functions) > 0)):
function = np.random.choice(transform_functions)
transform_functions.remove(function)
(modified_root, modified_code, success) = function(code, self)
if success:
code = modified_code
root_node = self.parse_code(code=code)
return_values = self.final_processor(code=code.encode(), root=root_node)
if isinstance(return_values, tuple):
(tokens, types) = return_values
else:
(tokens, types) = (return_values, None)
return (' '.join(tokens), {'types': types, 'success': success})
|
class DeadCodeInserter(TransformationBase):
def __init__(self, parser_path: str, language: str):
super(DeadCodeInserter, self).__init__(parser_path=parser_path, language=language)
self.language = language
self.processor = processor_function[self.language]
self.tokenizer_function = tokenizer_function[self.language]
self.insertion_function = insertion_function[self.language]
def insert_random_dead_code(self, code_string, max_node_in_statement=(- 1)):
root = self.parse_code(code_string)
original_node_count = count_nodes(root)
if (max_node_in_statement == (- 1)):
max_node_in_statement = int((original_node_count / 2))
if (self.language == 'ruby'):
statement_markers = ['assignment', 'until', 'call', 'if', 'for', 'while']
else:
statement_markers = None
statements = extract_statement_within_size(root, max_node_in_statement, statement_markers, code_string=code_string, tokenizer=self.tokenizer_function)
original_code = ' '.join(self.tokenizer_function(code_string, root))
try:
while (len(statements) > 0):
(random_stmt, insert_before) = np.random.choice(statements, 2)
statements.remove(random_stmt)
dead_coed_body = ' '.join(self.tokenizer_function(code_string, random_stmt)).strip()
dead_code_function = np.random.choice([self.processor.create_dead_for_loop, self.processor.create_dead_while_loop, self.processor.create_dead_if])
dead_code = dead_code_function(dead_coed_body)
modified_code = ' '.join(self.insertion_function(code_str=code_string, root=root, insertion_code=dead_code, insert_before_node=insert_before))
if (modified_code != original_code):
modified_root = self.parse_code(' '.join(modified_code))
return (modified_root, modified_code, True)
except:
pass
return (root, original_code, False)
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
(root, code, success) = self.insert_random_dead_code(code, (- 1))
code = re.sub('[ \n\t]+', ' ', code)
return (code, {'success': success})
|
class DemoTransformation(TransformationBase):
def __init__(self, parser, language):
super(DemoTransformation, self).__init__(parser_path=parser, language=language)
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
root_node = self.parse_code(code=code)
(tokens, types) = self.get_tokens_with_node_type(code=code.encode(), root=root_node)
return (' '.join(tokens), types)
|
class ForWhileTransformer(TransformationBase):
'\n Change the `for` loops with `while` loops and vice versa.\n '
def __init__(self, parser_path, language):
super(ForWhileTransformer, self).__init__(parser_path=parser_path, language=language)
self.language = language
self.transformations = processor_function[language]
processor_map = {'java': self.get_tokens_with_node_type, 'c': self.get_tokens_with_node_type, 'cpp': self.get_tokens_with_node_type, 'c_sharp': self.get_tokens_with_node_type, 'javascript': JavascriptProcessor.get_tokens, 'python': PythonProcessor.get_tokens, 'php': PhpProcessor.get_tokens, 'ruby': self.get_tokens_with_node_type, 'go': self.get_tokens_with_node_type}
self.final_processor = processor_map[self.language]
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
success = False
transform_functions = copy.deepcopy(self.transformations)
while ((not success) and (len(transform_functions) > 0)):
function = np.random.choice(transform_functions)
transform_functions.remove(function)
(modified_root, modified_code, success) = function(code, self)
if success:
code = modified_code
root_node = self.parse_code(code=code)
return_values = self.final_processor(code=code.encode(), root=root_node)
if isinstance(return_values, tuple):
(tokens, types) = return_values
else:
(tokens, types) = (return_values, None)
return (re.sub('[ \t\n]+', ' ', ' '.join(tokens)), {'types': types, 'success': success})
|
class NoTransformation(TransformationBase):
def __init__(self, parser_path: str, language: str) -> object:
super().__init__(parser_path, language)
if (not os.path.exists(parser_path)):
raise ValueError(f'Language parser does not exist at {parser_path}. Please run `setup.sh` to properly set the environment!')
self.lang_object = Language(parser_path, language)
self.parser = Parser()
self.parser.set_language(self.lang_object)
processor_map = {'java': self.get_tokens_with_node_type, 'c': self.get_tokens_with_node_type, 'cpp': self.get_tokens_with_node_type, 'c_sharp': self.get_tokens_with_node_type, 'javascript': JavascriptProcessor.get_tokens, 'python': PythonProcessor.get_tokens, 'php': PhpProcessor.get_tokens, 'ruby': self.get_tokens_with_node_type, 'go': self.get_tokens_with_node_type}
self.processor = processor_map[language]
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
root_node = self.parse_code(code=code)
return_values = self.processor(code=code.encode(), root=root_node)
if isinstance(return_values, tuple):
(tokens, types) = return_values
else:
(tokens, types) = (return_values, None)
return (re.sub('[ \t\n]+', ' ', ' '.join(tokens)), {'types': types, 'success': False})
|
class OperandSwap(TransformationBase):
'\n Swapping Operand "a>b" becomes "b<a"\n '
def __init__(self, parser_path, language):
super(OperandSwap, self).__init__(parser_path=parser_path, language=language)
self.language = language
self.transformations = processor_function[language]
processor_map = {'java': self.get_tokens_with_node_type, 'c': self.get_tokens_with_node_type, 'cpp': self.get_tokens_with_node_type, 'c_sharp': self.get_tokens_with_node_type, 'javascript': JavascriptProcessor.get_tokens, 'python': PythonProcessor.get_tokens, 'php': PhpProcessor.get_tokens, 'ruby': self.get_tokens_with_node_type, 'go': self.get_tokens_with_node_type}
self.final_processor = processor_map[self.language]
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
success = False
transform_functions = copy.deepcopy(self.transformations)
while ((not success) and (len(transform_functions) > 0)):
function = np.random.choice(transform_functions)
transform_functions.remove(function)
(modified_code, success) = function(code, self)
if success:
code = modified_code
root_node = self.parse_code(code=code)
return_values = self.final_processor(code=code.encode(), root=root_node)
if isinstance(return_values, tuple):
(tokens, types) = return_values
else:
(tokens, types) = (return_values, None)
return (re.sub('[ \t\n]+', ' ', ' '.join(tokens)), {'types': types, 'success': success})
|
def masking(tokens, p):
new_tokens = []
for t in tokens:
if (np.random.uniform() < p):
new_tokens.append('<mask>')
else:
new_tokens.append(t)
return ' '.join(new_tokens)
|
def deletion(tokens, p):
new_tokens = []
for t in tokens:
if (np.random.uniform() >= p):
new_tokens.append(t)
return ' '.join(new_tokens)
|
def token_infilling(tokens, p):
new_tokens = []
max_infilling_len = round((int((p * len(tokens))) / 2.0))
infilling_len = np.random.randint(1, max_infilling_len)
start_index = np.random.uniform(high=(len(tokens) - infilling_len))
end_index = (start_index + infilling_len)
for (i, t) in enumerate(tokens):
if ((i < start_index) or (i > end_index)):
new_tokens.append(t)
return ' '.join(new_tokens)
|
class SyntacticNoisingTransformation(TransformationBase):
def __init__(self, parser_path: str, language: str, noise_ratio=0.15):
self.language = language
if (self.language == 'nl'):
self.tokenizer = nltk.word_tokenize
else:
self.tokenizer = NoTransformation(parser_path, language)
self.noise_ratio = noise_ratio
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
if (self.language == 'nl'):
tokens = self.tokenizer(code)
else:
(tokenized_code, _) = self.tokenizer.transform_code(code)
tokens = tokenized_code.split()
p = np.random.uniform()
if (p < 0.33):
transformed_code = masking(tokens, self.noise_ratio)
elif (p < 0.66):
transformed_code = deletion(tokens, self.noise_ratio)
else:
transformed_code = token_infilling(tokens, self.noise_ratio)
return (transformed_code, {'success': True})
|
def get_ancestor_type_chains(node: tree_sitter.Node) -> List[str]:
types = [str(node.type)]
while (node.parent is not None):
node = node.parent
types.append(str(node.type))
return types
|
class TransformationBase():
def __init__(self, parser_path: str, language: str):
if (not os.path.exists(parser_path)):
raise ValueError(f'Language parser does not exist at {parser_path}. Please run `setup.sh` to properly set the environment!')
self.lang_object = Language(parser_path, language)
self.parser = Parser()
self.parser.set_language(self.lang_object)
pass
def parse_code(self, code: Union[(str, bytes)]) -> tree_sitter.Node:
'\n This function parses a given code and return the root node.\n :param code:\n :return: tree_sitter.Node, the root node of the parsed tree.\n '
if isinstance(code, bytes):
tree = self.parser.parse(code)
elif isinstance(code, str):
tree = self.parser.parse(code.encode())
else:
raise ValueError('Code must be character string or bytes string')
return tree.root_node
def get_tokens(self, code: bytes, root: tree_sitter.Node) -> List[str]:
'\n This function is for getting tokens recursively from a tree.\n :param code: the byte string corresponding to the code.\n :param root: the root node of the parsed tree\n :return: List of Tokens.\n '
tokens = []
if (root.type == 'comment'):
return tokens
if ('string' in str(root.type)):
parent = root.parent
if (('list' not in str(parent.type)) and (len(parent.children) == 1)):
return tokens
else:
return [code[root.start_byte:root.end_byte].decode()]
if (len(root.children) == 0):
tokens.append(code[root.start_byte:root.end_byte].decode())
else:
for child in root.children:
tokens += self.get_tokens(code, child)
return tokens
def get_token_string(self, code: str, root: tree_sitter.Node) -> str:
'\n This is a auxiliary function for just extracting the parsed token string.\n :param code: the byte string corresponding to the code.\n :param root: the root node of the parsed tree\n :return: str, the parsed code a string of tokens.\n '
tokens = self.get_tokens(code.encode(), root)
return ' '.join(tokens)
def get_tokens_with_node_type(self, code: bytes, root: tree_sitter.Node) -> Tuple[(List[str], List[List[str]])]:
'\n This function extracts the tokens and types of the tokens.\n It returns a list of string as tokens, and a list of list of string as types.\n For every token, it extracts the sequence of ast node type starting from the token all the way to the root.\n :param code: the byte string corresponding to the code.\n :param root: the root node of the parsed tree\n :return:\n List[str]: The list of tokens.\n List[List[str]]: The AST node types corresponding to every token.\n '
(tokens, types) = ([], [])
if (root.type == 'comment'):
return (tokens, types)
if ('string' in str(root.type)):
return ([code[root.start_byte:root.end_byte].decode()], [['string']])
if (len(root.children) == 0):
tokens.append(code[root.start_byte:root.end_byte].decode())
types.append(get_ancestor_type_chains(root))
else:
for child in root.children:
(_tokens, _types) = self.get_tokens_with_node_type(code, child)
tokens += _tokens
types += _types
return (tokens, types)
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
'\n Transforms a piece of code and returns the transformed version\n :param code: The code to be transformed either as a character string of bytes string.\n :return:\n A tuple, where the first member is the transformed code.\n The second member might be other metadata (e.g. nde types) of the transformed code. It can be None as well.\n '
pass
|
class SemanticPreservingTransformation():
def __init__(self, parser_path: str, language: str, transform_functions: Dict[(Callable, int)]=None):
self.language = language
if (transform_functions is not None):
self.transform_functions = transform_functions
else:
self.transform_functions = {BlockSwap: 1, ConfusionRemover: 1, DeadCodeInserter: 1, ForWhileTransformer: 1, OperandSwap: 1, SyntacticNoisingTransformation: 1}
self.transformations = []
if (self.language == 'nl'):
self.transformations.append(SyntacticNoisingTransformation(parser_path=parser_path, language='nl'))
else:
for t in self.transform_functions:
for _ in range(self.transform_functions[t]):
self.transformations.append(t(parser_path=parser_path, language=language))
def transform_code(self, code: str):
(transformed_code, transformation_name) = (None, None)
indices = list(range(len(self.transformations)))
np.random.shuffle(indices)
success = False
while ((not success) and (len(indices) > 0)):
si = np.random.choice(indices)
indices.remove(si)
t = self.transformations[si]
(transformed_code, metadata) = t.transform_code(code)
success = metadata['success']
if success:
transformation_name = type(t).__name__
if (not success):
return (code, None)
return (transformed_code, transformation_name)
|
class VarRenamer(TransformationBase):
def __init__(self, parser_path: str, language: str):
super(VarRenamer, self).__init__(parser_path=parser_path, language=language)
self.language = language
self.processor = processor_function[self.language]
self.tokenizer_function = tokenizer_function[self.language]
self.not_var_ptype = ['function_declarator', 'class_declaration', 'method_declaration', 'function_definition', 'function_declaration', 'call', 'local_function_statement']
def extract_var_names(self, root, code_string):
var_names = []
queue = [root]
while (len(queue) > 0):
current_node = queue[0]
queue = queue[1:]
if (((current_node.type == 'identifier') or (current_node.type == 'variable_name')) and (str(current_node.parent.type) not in self.not_var_ptype)):
var_names.append(self.tokenizer_function(code_string, current_node)[0])
for child in current_node.children:
queue.append(child)
return var_names
def var_renaming(self, code_string):
root = self.parse_code(code_string)
original_code = self.tokenizer_function(code_string, root)
var_names = self.extract_var_names(root, code_string)
var_names = list(set(var_names))
num_to_rename = math.ceil((0.2 * len(var_names)))
random.shuffle(var_names)
var_names = var_names[:num_to_rename]
var_map = {}
for (idx, v) in enumerate(var_names):
var_map[v] = f'VAR_{idx}'
modified_code = []
for t in original_code:
if (t in var_names):
modified_code.append(var_map[t])
else:
modified_code.append(t)
modified_code_string = ' '.join(modified_code)
if (modified_code != original_code):
modified_root = self.parse_code(modified_code_string)
return (modified_root, modified_code_string, True)
else:
return (root, code_string, False)
def transform_code(self, code: Union[(str, bytes)]) -> Tuple[(str, object)]:
(root, code, success) = self.var_renaming(code)
code = re.sub('[ \n\t]+', ' ', code)
return (code, {'success': success})
|
def sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False):
'\n Calculate BLEU score (Bilingual Evaluation Understudy) from\n Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.\n "BLEU: a method for automatic evaluation of machine translation."\n In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf\n >>> hypothesis1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'always\',\n ... \'obeys\', \'the\', \'commands\', \'of\', \'the\', \'party\']\n >>> hypothesis2 = [\'It\', \'is\', \'to\', \'insure\', \'the\', \'troops\',\n ... \'forever\', \'hearing\', \'the\', \'activity\', \'guidebook\',\n ... \'that\', \'party\', \'direct\']\n >>> reference1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'will\', \'forever\',\n ... \'heed\', \'Party\', \'commands\']\n >>> reference2 = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'military\', \'forces\', \'always\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\',\n ... \'Party\']\n >>> reference3 = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'army\', \'always\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'party\']\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS\n 0.5045...\n If there is no ngrams overlap for any order of n-grams, BLEU returns the\n value 0. This is because the precision for the order of n-grams without\n overlap is 0, and the geometric mean in the final BLEU score computation\n multiplies the 0 with the precision of other n-grams. This results in 0\n (independently of the precision of the othe n-gram orders). The following\n example has zero 3-gram and 4-gram overlaps:\n >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS\n 0.0\n To avoid this harsh behaviour when no ngram overlaps are found a smoothing\n function can be used.\n >>> chencherry = SmoothingFunction()\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,\n ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS\n 0.0370...\n The default BLEU calculates a score for up to 4-grams using uniform\n weights (this is called BLEU-4). To evaluate your translations with\n higher/lower order ngrams, use customized weights. E.g. when accounting\n for up to 5-grams with uniform weights (this is called BLEU-5) use:\n >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS\n 0.3920...\n :param references: reference sentences\n :type references: list(list(str))\n :param hypothesis: a hypothesis sentence\n :type hypothesis: list(str)\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The sentence-level BLEU score.\n :rtype: float\n '
return corpus_bleu([references], [hypothesis], weights, smoothing_function, auto_reweigh)
|
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False):
"\n Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all\n the hypotheses and their respective references.\n Instead of averaging the sentence level BLEU scores (i.e. marco-average\n precision), the original BLEU metric (Papineni et al. 2002) accounts for\n the micro-average precision (i.e. summing the numerators and denominators\n for each hypothesis-reference(s) pairs before the division).\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS\n 0.5920...\n The example below show that corpus_bleu() is different from averaging\n sentence_bleu() for hypotheses\n >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)\n >>> score2 = sentence_bleu([ref2a], hyp2)\n >>> (score1 + score2) / 2 # doctest: +ELLIPSIS\n 0.6223...\n :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses\n :type list_of_references: list(list(list(str)))\n :param hypotheses: a list of hypothesis sentences\n :type hypotheses: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The corpus-level BLEU score.\n :rtype: float\n "
p_numerators = Counter()
p_denominators = Counter()
(hyp_lengths, ref_lengths) = (0, 0)
assert (len(list_of_references) == len(hypotheses)), 'The number of hypotheses and their reference(s) should be the same '
for (references, hypothesis) in zip(list_of_references, hypotheses):
for (i, _) in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
bp = brevity_penalty(ref_lengths, hyp_lengths)
if auto_reweigh:
if ((hyp_lengths < 4) and (weights == (0.25, 0.25, 0.25, 0.25))):
weights = (((1 / hyp_lengths),) * hyp_lengths)
p_n = [Fraction(p_numerators[i], p_denominators[i], _normalize=False) for (i, _) in enumerate(weights, start=1)]
if (p_numerators[1] == 0):
return 0
if (not smoothing_function):
smoothing_function = SmoothingFunction().method1
p_n = smoothing_function(p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths)
s = ((w_i * math.log(p_i)) for (w_i, p_i) in zip(weights, p_n))
s = (bp * math.exp(math.fsum(s)))
return s
|
def modified_precision(references, hypothesis, n):
'\n Calculate modified ngram precision.\n The normal precision method may lead to some wrong translations with\n high-precision, e.g., the translation, in which a word of reference\n repeats several times, has very high precision.\n This function only returns the Fraction object that contains the numerator\n and denominator necessary to calculate the corpus-level precision.\n To calculate the modified precision for a single pair of hypothesis and\n references, cast the Fraction object into a float.\n The famous "the the the ... " example shows that you can get BLEU precision\n by duplicating high frequency words.\n >>> reference1 = \'the cat is on the mat\'.split()\n >>> reference2 = \'there is a cat on the mat\'.split()\n >>> hypothesis1 = \'the the the the the the the\'.split()\n >>> references = [reference1, reference2]\n >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS\n 0.2857...\n In the modified n-gram precision, a reference word will be considered\n exhausted after a matching hypothesis word is identified, e.g.\n >>> reference1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'will\',\n ... \'forever\', \'heed\', \'Party\', \'commands\']\n >>> reference2 = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'military\', \'forces\', \'always\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\',\n ... \'Party\']\n >>> reference3 = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'army\', \'always\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'party\']\n >>> hypothesis = \'of the\'.split()\n >>> references = [reference1, reference2, reference3]\n >>> float(modified_precision(references, hypothesis, n=1))\n 1.0\n >>> float(modified_precision(references, hypothesis, n=2))\n 1.0\n An example of a normal machine translation hypothesis:\n >>> hypothesis1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'always\',\n ... \'obeys\', \'the\', \'commands\', \'of\', \'the\', \'party\']\n >>> hypothesis2 = [\'It\', \'is\', \'to\', \'insure\', \'the\', \'troops\',\n ... \'forever\', \'hearing\', \'the\', \'activity\', \'guidebook\',\n ... \'that\', \'party\', \'direct\']\n >>> reference1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'will\',\n ... \'forever\', \'heed\', \'Party\', \'commands\']\n >>> reference2 = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'military\', \'forces\', \'always\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\',\n ... \'Party\']\n >>> reference3 = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'army\', \'always\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'party\']\n >>> references = [reference1, reference2, reference3]\n >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS\n 0.9444...\n >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS\n 0.5714...\n >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS\n 0.5882352941176471\n >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS\n 0.07692...\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hypothesis: A hypothesis translation.\n :type hypothesis: list(str)\n :param n: The ngram order.\n :type n: int\n :return: BLEU\'s modified precision for the nth order ngram.\n :rtype: Fraction\n '
counts = (Counter(ngrams(hypothesis, n)) if (len(hypothesis) >= n) else Counter())
max_counts = {}
for reference in references:
reference_counts = (Counter(ngrams(reference, n)) if (len(reference) >= n) else Counter())
for ngram in counts:
max_counts[ngram] = max(max_counts.get(ngram, 0), reference_counts[ngram])
clipped_counts = {ngram: min(count, max_counts[ngram]) for (ngram, count) in counts.items()}
numerator = sum(clipped_counts.values())
denominator = max(1, sum(counts.values()))
return Fraction(numerator, denominator, _normalize=False)
|
def closest_ref_length(references, hyp_len):
"\n This function finds the reference that is the closest length to the\n hypothesis. The closest reference length is referred to as *r* variable\n from the brevity penalty formula in Papineni et. al. (2002)\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hyp_len: The length of the hypothesis.\n :type hyp_len: int\n :return: The length of the reference that's closest to the hypothesis.\n :rtype: int\n "
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=(lambda ref_len: (abs((ref_len - hyp_len)), ref_len)))
return closest_ref_len
|
def brevity_penalty(closest_ref_len, hyp_len):
"\n Calculate brevity penalty.\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n An example from the paper. There are three references with length 12, 15\n and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.\n >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15\n >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17\n >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> references = [reference1, reference2, reference3]\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n In case a hypothesis translation is shorter than the references, penalty is\n applied.\n >>> references = [['a'] * 28, ['a'] * 28]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 0.2635971381157267\n The length of the closest reference is used to compute the penalty. If the\n length of a hypothesis is 12, and the reference lengths are 13 and 2, the\n penalty is applied because the hypothesis length (12) is less then the\n closest reference length (13).\n >>> references = [['a'] * 13, ['a'] * 2]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.9200...\n The brevity penalty doesn't depend on reference order. More importantly,\n when two reference sentences are at the same distance, the shortest\n reference sentence length is used.\n >>> references = [['a'] * 13, ['a'] * 11]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)\n >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)\n >>> bp1 == bp2 == 1\n True\n A test example from mteval-v13a.pl (starting from the line 705):\n >>> references = [['a'] * 11, ['a'] * 8]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.8668...\n >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n :param hyp_len: The length of the hypothesis for a single sentence OR the\n sum of all the hypotheses' lengths for a corpus\n :type hyp_len: int\n :param closest_ref_len: The length of the closest reference for a single\n hypothesis OR the sum of all the closest references for every hypotheses.\n :type closest_ref_len: int\n :return: BLEU's brevity penalty.\n :rtype: float\n "
if (hyp_len > closest_ref_len):
return 1
elif (hyp_len == 0):
return 0
else:
return math.exp((1 - (closest_ref_len / hyp_len)))
|
class SmoothingFunction():
'\n This is an implementation of the smoothing techniques\n for segment-level BLEU scores that was presented in\n Boxing Chen and Collin Cherry (2014) A Systematic Comparison of\n Smoothing Techniques for Sentence-Level BLEU. In WMT14.\n http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf\n '
def __init__(self, epsilon=0.1, alpha=5, k=5):
"\n This will initialize the parameters required for the various smoothing\n techniques, the default values are set to the numbers used in the\n experiments from Chen and Cherry (2014).\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',\n ... 'that', 'the', 'military', 'always', 'obeys', 'the',\n ... 'commands', 'of', 'the', 'party']\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',\n ... 'that', 'the', 'military', 'will', 'forever', 'heed',\n ... 'Party', 'commands']\n >>> chencherry = SmoothingFunction()\n >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS\n 0.4489...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS\n 0.4905...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS\n 0.4135...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS\n 0.4905...\n :param epsilon: the epsilon value use in method 1\n :type epsilon: float\n :param alpha: the alpha value use in method 6\n :type alpha: int\n :param k: the k value use in method 4\n :type k: int\n "
self.epsilon = epsilon
self.alpha = alpha
self.k = k
def method0(self, p_n, *args, **kwargs):
'\n No smoothing.\n '
p_n_new = []
for (i, p_i) in enumerate(p_n):
if (p_i.numerator != 0):
p_n_new.append(p_i)
else:
_msg = str('\nThe hypothesis contains 0 counts of {}-gram overlaps.\nTherefore the BLEU score evaluates to 0, independently of\nhow many N-gram overlaps of lower order it contains.\nConsider using lower n-gram order or use SmoothingFunction()').format((i + 1))
warnings.warn(_msg)
p_n_new.append(sys.float_info.min)
return p_n_new
def method1(self, p_n, *args, **kwargs):
'\n Smoothing method 1: Add *epsilon* counts to precision with 0 counts.\n '
return [(((p_i.numerator + self.epsilon) / p_i.denominator) if (p_i.numerator == 0) else p_i) for p_i in p_n]
def method2(self, p_n, *args, **kwargs):
'\n Smoothing method 2: Add 1 to both numerator and denominator from\n Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of\n machine translation quality using longest common subsequence and\n skip-bigram statistics. In ACL04.\n '
return [Fraction((p_i.numerator + 1), (p_i.denominator + 1), _normalize=False) for p_i in p_n]
def method3(self, p_n, *args, **kwargs):
"\n Smoothing method 3: NIST geometric sequence smoothing\n The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each\n precision score whose matching n-gram count is null.\n k is 1 for the first 'n' value for which the n-gram match count is null/\n For example, if the text contains:\n - one 2-gram match\n - and (consequently) two 1-gram matches\n the n-gram count for each individual precision score would be:\n - n=1 => prec_count = 2 (two unigrams)\n - n=2 => prec_count = 1 (one bigram)\n - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)\n - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)\n "
incvnt = 1
for (i, p_i) in enumerate(p_n):
if (p_i.numerator == 0):
p_n[i] = (1 / ((2 ** incvnt) * p_i.denominator))
incvnt += 1
return p_n
def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 4:\n Shorter translations may have inflated precision values due to having\n smaller denominators; therefore, we give them proportionally\n smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry\n suggests dividing by 1/ln(len(T)), where T is the length of the translation.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
for (i, p_i) in enumerate(p_n):
if ((p_i.numerator == 0) and (hyp_len != 0)):
incvnt = (i + ((1 * self.k) / math.log(hyp_len)))
p_n[i] = (incvnt / p_i.denominator)
return p_n
def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 5:\n The matched counts for similar values of n should be similar. To a\n calculate the n-gram matched count, it averages the n−1, n and n+1 gram\n matched counts.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
m = {}
p_n_plus1 = (p_n + [modified_precision(references, hypothesis, 5)])
m[(- 1)] = (p_n[0] + 1)
for (i, p_i) in enumerate(p_n):
p_n[i] = (((m[(i - 1)] + p_i) + p_n_plus1[(i + 1)]) / 3)
m[i] = p_n[i]
return p_n
def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 6:\n Interpolates the maximum likelihood estimate of the precision *p_n* with\n a prior estimate *pi0*. The prior is estimated by assuming that the ratio\n between pn and pn−1 will be the same as that between pn−1 and pn−2; from\n Gao and He (2013) Training MRF-Based Phrase Translation Models using\n Gradient Ascent. In NAACL.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
assert p_n[2], 'This smoothing method requires non-zero precision for bigrams.'
for (i, p_i) in enumerate(p_n):
if (i in [0, 1]):
continue
else:
pi0 = (0 if (p_n[(i - 2)] == 0) else ((p_n[(i - 1)] ** 2) / p_n[(i - 2)]))
m = p_i.numerator
l = sum((1 for _ in ngrams(hypothesis, (i + 1))))
p_n[i] = ((m + (self.alpha * pi0)) / (l + self.alpha))
return p_n
def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 7:\n Interpolates methods 4 and 5.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
p_n = self.method4(p_n, references, hypothesis, hyp_len)
p_n = self.method5(p_n, references, hypothesis, hyp_len)
return p_n
|
def evaluate_per_example(reference, hypothesis, lang, params='0.25,0.25,0.25,0.25'):
(alpha, beta, gamma, theta) = [float(x) for x in params.split(',')]
hypothesis = [hypothesis]
pre_references = [[reference]]
for i in range(len(pre_references)):
assert (len(hypothesis) == len(pre_references[i]))
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert (len(references) == (len(pre_references) * len(hypothesis)))
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs, tokenized_hyps)
root_dir = os.path.dirname(__file__)
keywords = [x.strip() for x in open((((root_dir + '/keywords/') + lang) + '.txt'), 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token: (1 if (token in key_word_list) else 0.2) for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)] for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights, tokenized_hyps)
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, lang)
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'.format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
codebleu = ((((alpha * ngram_match_score) + (beta * weighted_ngram_match_score)) + (gamma * syntax_match_score)) + (theta * dataflow_match_score))
return {'em': (1.0 if (reference.strip() == hypothesis[0].strip()) else 0.0), 'bleu': ngram_match_score, 'wbleu': weighted_ngram_match_score, 'syntax': syntax_match_score, 'dataflow': dataflow_match_score, 'codebleu': codebleu}
|
def get_codebleu(refs, hyp, lang, params='0.25,0.25,0.25,0.25'):
if (not isinstance(refs, list)):
refs = [refs]
(alpha, beta, gamma, theta) = [float(x) for x in params.split(',')]
pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] for file in refs]
hypothesis = [x.strip() for x in open(hyp, 'r', encoding='utf-8').readlines()]
for i in range(len(pre_references)):
assert (len(hypothesis) == len(pre_references[i]))
references = []
for i in range(len(hypothesis)):
ref_for_instance = []
for j in range(len(pre_references)):
ref_for_instance.append(pre_references[j][i])
references.append(ref_for_instance)
assert (len(references) == (len(pre_references) * len(hypothesis)))
tokenized_hyps = [x.split() for x in hypothesis]
tokenized_refs = [[x.split() for x in reference] for reference in references]
ngram_match_score = bleu.corpus_bleu(tokenized_refs, tokenized_hyps)
root_dir = os.path.dirname(__file__)
keywords = [x.strip() for x in open((((root_dir + '/keywords/') + lang) + '.txt'), 'r', encoding='utf-8').readlines()]
def make_weights(reference_tokens, key_word_list):
return {token: (1 if (token in key_word_list) else 0.2) for token in reference_tokens}
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)] for reference_tokens in reference] for reference in tokenized_refs]
weighted_ngram_match_score = weighted_ngram_match.corpus_bleu(tokenized_refs_with_weights, tokenized_hyps)
syntax_match_score = syntax_match.corpus_syntax_match(references, hypothesis, lang)
dataflow_match_score = dataflow_match.corpus_dataflow_match(references, hypothesis, lang)
print('ngram match: {0}, weighted ngram match: {1}, syntax_match: {2}, dataflow_match: {3}'.format(ngram_match_score, weighted_ngram_match_score, syntax_match_score, dataflow_match_score))
codebleu = ((((alpha * ngram_match_score) + (beta * weighted_ngram_match_score)) + (gamma * syntax_match_score)) + (theta * dataflow_match_score))
return codebleu
|
def my_dataflow_match(references, candidates, lang):
LANGUAGE = Language((root_dir + '/parser/languages.so'), lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser, dfg_function[lang]]
match_count = 0
total_count = 0
candidate_scores = []
for i in range(len(candidates)):
scores = []
references_sample = references[i]
candidate = candidates[i]
for reference in references_sample:
try:
candidate = remove_comments_and_docstrings(candidate, 'java')
except:
pass
try:
reference = remove_comments_and_docstrings(reference, 'java')
except:
pass
cand_dfg = get_data_flow(candidate, parser)
ref_dfg = get_data_flow(reference, parser)
normalized_cand_dfg = normalize_dataflow(cand_dfg)
normalized_ref_dfg = normalize_dataflow(ref_dfg)
if (len(normalized_ref_dfg) > 0):
total_count += len(normalized_ref_dfg)
current_match_count = 0
for dataflow in normalized_ref_dfg:
if (dataflow in normalized_cand_dfg):
match_count += 1
normalized_cand_dfg.remove(dataflow)
current_match_count += 1
scores.append((float(current_match_count) / len(normalized_ref_dfg)))
else:
scores.append(0.0)
candidate_scores.append((max(scores) if (len(scores) > 0) else 0.0))
return (np.mean(candidate_scores) if (len(candidates) > 0) else 0.0)
|
def calc_dataflow_match(references, candidate, lang):
return corpus_dataflow_match([references], [candidate], lang)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.