repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
LiteFlowNet2 | LiteFlowNet2-master/models/testing/runtime.py | #!/usr/bin/env python
import os, sys
import subprocess
caffe_bin = 'bin/caffe.bin'
# =========================================================
my_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(my_dir)
if not os.path.isfile(caffe_bin):
print('Caffe tool binaries not found. Did you compile caffe with tools (make all tools)?')
sys.exit(1)
print('args:', sys.argv[1:])
args = [caffe_bin, 'time', '-model', './model/testing/deploy_runtime_1024_448.prototxt', '-weights', './models/trained/LiteFlowNet2-ft-sintel.caffemodel', '-gpu', '0', '-iterations', '100'] + sys.argv[1:]
cmd = str.join(' ', args)
print('Executing %s' % cmd)
subprocess.call(args)
| 674 | 29.681818 | 204 | py |
LogicLLaMA | LogicLLaMA-main/run_eval.py | from tqdm import tqdm
import torch
from functools import partial
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
from peft import PeftModel, prepare_model_for_int8_training
from utils import TranslationDataPreparer, ContinuousCorrectionDataPreparer, make_parent_dirs
from generate import llama_generate
from metrics import UniversalMetrics
import fire
import json
from utils import all_exists
def eval_llama_model(
base_model='storage/llama-6B',
peft_path='storage/translation_llama_sft',
prompt_template_path='data/prompt_templates',
load_in_8bit: bool = True,
run_name='',
data_path=None,
save_path=None,
data_keys=None,
translation_task=True,
continuous_mode=False,
prev_correct_key=None,
max_input_len=768,
max_output_len=128,
max_n_continuous=10,
save_log_every_n_iters=10,
do_eval=True
):
if translation_task:
assert not continuous_mode, 'continuous_mode is for correction task only'
assert all_exists(data_path, save_path, data_keys)
make_parent_dirs(save_path)
tokenizer = LlamaTokenizer.from_pretrained(base_model)
tokenizer.add_special_tokens({
"eos_token": "</s>",
"bos_token": "<s>",
"unk_token": '<unk>',
"pad_token": '<unk>',
})
tokenizer.padding_side = "left" # Allow batched inference
DataPreparer = TranslationDataPreparer if translation_task else ContinuousCorrectionDataPreparer
data_preparer = DataPreparer(
prompt_template_path,
tokenizer,
False,
256 # just a filler number
)
prepare_input = partial(
data_preparer.prepare_input,
**data_keys,
add_eos_token=False,
eval_mode=True,
return_tensors='pt'
)
generation_config = GenerationConfig(
temperature=0.1,
top_p=0.75,
top_k=40,
num_beams=1
)
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_in_8bit,
torch_dtype=torch.float16,
device_map='auto',
)
model = prepare_model_for_int8_training(model)
if all_exists(peft_path):
model = PeftModel.from_pretrained(
model,
peft_path,
torch_dtype=torch.float16
)
model.to('cuda')
simple_generate = partial(
llama_generate,
llama_model=model,
data_preparer=data_preparer,
max_new_tokens=max_output_len,
generation_config=generation_config,
prepare_input=prepare_input,
return_tensors=False
)
metric = UniversalMetrics()
with open(data_path, 'r') as f:
data = json.load(f)
for ind, data_point in enumerate(tqdm(data)):
resp_key = run_name+'_FOL bleu'
resp_exists = (resp_key in data_point) and all_exists(data_point[resp_key])
true_fol = data_point['FOL'] if 'FOL' in data_point else None
all_input_field_exists = all(
(e in data_point) and
all_exists(data_point[e])
for e in data_keys.values()
)
if not all_input_field_exists:
tqdm.write(f'{ind} sample invalid, skipping this one')
continue
if resp_exists:
continue
# if continuous mode, then we keep edit until the model outputs "no changes needed" or we hit the input
# length cap
if continuous_mode:
should_terminate = False
full_resp_str, resp_parts = None, None
cnt = 0
while not should_terminate:
tmp_full_resp_str, tmp_resp_parts = simple_generate(input_str=data_point)
comments, pred_fol = tmp_resp_parts
if (comments is None) or (pred_fol is None):
tqdm.write(f'failed to comments or pred_fol for {ind}')
tqdm.write(f'\n\n\n {tmp_full_resp_str} \n\n\n')
break
if prev_correct_key not in data_point:
data_point[prev_correct_key] = comments
else:
data_point[prev_correct_key] += comments
inlen, _ = data_preparer.get_input_output_len(data_point, **data_keys)
should_terminate = ('No changes needed' in comments) or (inlen > max_input_len)
full_resp_str, resp_parts = tmp_full_resp_str, tmp_resp_parts
cnt += 1
if cnt >= max_n_continuous:
tqdm.write(f'hit the continuous cap for sample {ind}, might be a sign of a bug')
break
else:
full_resp_str, resp_parts = simple_generate(input_str=data_point)
if not all_exists(full_resp_str, resp_parts):
bleu, LE = 0., 0.
tqdm.write(f'none response for {ind} assigning 0 score')
elif resp_parts[-1] is None:
bleu, LE = 0., 0.
tqdm.write(f'None pred_fol for {ind} assigning 0 score')
elif resp_parts[-1] == '':
bleu, LE = 0., 0.
tqdm.write(f'Empty pred_fol for {ind} assigning 0 score')
else:
if all_exists(true_fol) and do_eval:
res = metric.evaluate(
None,
true_fol,
None,
resp_parts[-1]
)
bleu, LE = res.FOL_bleu, res.FOL_LE
else:
bleu, LE = 0., 0.
data_point[run_name+'_FOL bleu'] = bleu
data_point[run_name+'_FOL LE'] = LE
data_point[run_name+'_pred'] = full_resp_str
tqdm.write(
f'True FOL: {data_point["FOL"]}\n'
f'Pred FOL: {resp_parts[-1] if all_exists(resp_parts) else None}\n'
f'BLEU: {bleu:.3f} LE: {LE:.3f}\n'
'---\n'
)
if ind % save_log_every_n_iters == 0:
with open(save_path, 'w') as f:
json.dump(data, f)
with open(save_path, 'w') as f:
json.dump(data, f)
if __name__ == '__main__':
fire.Fire(eval_llama_model) | 6,146 | 32.227027 | 111 | py |
LogicLLaMA | LogicLLaMA-main/generate.py | from transformers import GenerationConfig, LlamaForCausalLM
import torch
from utils import DataPreparer, all_exists
from typing import Dict, Optional, Callable
from functools import partial
def llama_generate(
llama_model: LlamaForCausalLM,
data_preparer: DataPreparer,
input_str: str,
max_new_tokens: int,
generation_config: GenerationConfig,
prompt_keys: Optional[Dict[str, str]] = None,
prepare_input: Optional[Callable] = None,
return_tensors: bool = False,
rlhf_mode: bool = False
):
assert not all_exists(prompt_keys, prepare_input), \
'either give me the prompt_keys or the pre-compiled prepare input func'
if all_exists(prompt_keys):
prepare_input = partial(data_preparer.prepare_input, **prompt_keys)
elif all_exists(prepare_input):
pass
else:
raise ValueError('either give me the prompt_keys or the pre-compiled prepare input func')
inputs = prepare_input(input_str)
input_ids = inputs['input_ids'].to('cuda')
llama_model.eval()
with torch.autocast(device_type='cuda', dtype=torch.float16):
with torch.no_grad():
generation_output = llama_model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens
)
llama_model.train()
s = generation_output.sequences[0]
# in rlhf mode, we generate nonstop and regardless of eos token, but to get the right parts for computing the
# reward, we find the main_str by splitting the str with eos token and extract the rule parts
if rlhf_mode:
output = data_preparer.tokenizer.decode(s)
parts = output.split(data_preparer.tokenizer.eos_token)
main_str = parts[0]
_, resp_parts = data_preparer.get_response(main_str)
full_resp_str, _ = data_preparer.get_response(output)
else:
output = data_preparer.tokenizer.decode(s, skip_special_tokens=True)
full_resp_str, resp_parts = data_preparer.get_response(output)
if return_tensors:
return full_resp_str, resp_parts, input_ids, data_preparer.tokenizer(full_resp_str, return_tensors='pt')
else:
return full_resp_str, resp_parts | 2,327 | 37.8 | 113 | py |
LogicLLaMA | LogicLLaMA-main/sft.py | import json
import os
from typing import List, Optional
import torch
import transformers
from datasets import load_dataset
from utils import all_exists
from functools import partial
from peft import (
LoraConfig,
get_peft_model,
prepare_model_for_int8_training,
get_peft_model_state_dict
)
from transformers import LlamaForCausalLM, LlamaTokenizer
from utils import TranslationDataPreparer, ContinuousCorrectionDataPreparer
import fire
import wandb
import numpy as np
def prepare_dataset(data_path, val_data_path, prepare_input, val_size, data_keys):
with open(data_path, 'r') as f:
train_data = json.load(f)
if all_exists(val_data_path):
with open(val_data_path, 'r') as f:
val_data = json.load(f)
else:
np.random.shuffle(train_data)
val_data, train_data = train_data[:val_size], train_data[val_size:]
# add required entries and save the processed datasets
processed_train_fp, processed_valid_fp = 'data/train_data.json', 'data/valid_data.json'
for save_fp, data in [[processed_valid_fp, val_data], [processed_train_fp, train_data]]:
for data_point in data:
data_point['Suggestion'] = 'N/A'
data_point['Correct FOL'] = data_point['FOL']
data_point['valid'] = all(
(e in data_point) and
all_exists(data_point[e])
for e in data_keys.values()
)
data = [data_point for data_point in data if data_point['valid']]
print(f'{len(data)} valid data saved in {save_fp}')
with open(save_fp, 'w') as f:
json.dump(data, f)
data_files = {'train': processed_train_fp, 'test': processed_valid_fp}
data = load_dataset("json", data_files=data_files)
train_data = data['train'].shuffle().map(prepare_input)
val_data = data['test'].shuffle().map(prepare_input)
return train_data, val_data
def train(
# model/data params
base_model: str = "",
data_path: str = "",
load_in_8bit: bool = True,
val_data_path: Optional[str] = None,
val_size: int = 3000,
prompt_template_path: str = "",
output_dir: str = "./logs",
translation_task: bool = True,
continuous_correction: bool = False,
saved_full_model_path: Optional[str] = None, # load the full saved peft model, only for ad hoc use
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 4,
num_epochs: int = 3,
learning_rate: float = 3e-4,
warmup_steps: int = 100,
logging_steps: int = 10,
eval_steps: int = 200,
save_steps: int = 200,
save_total_limit: int =3,
cutoff_len: int = 256,
# lora hyperparams
lora_r: int = 8,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
lora_target_modules: List[str] = [
"q_proj",
"v_proj",
],
device_map: str = "auto",
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
use_wandb: bool = True,
wandb_project: str = "naive_translate_llama_sft",
wandb_run_name: str = "default_run",
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
):
assert isinstance(lora_target_modules, list)
if use_wandb:
wandb.init(
project=wandb_project,
name=wandb_run_name,
)
if not os.path.isdir(base_model):
print('base_model does not seem to be a file path, will try to load it with from_pretrained anyway')
assert os.path.isdir(prompt_template_path), 'cannot locate the prompt template'
assert os.path.isfile(data_path), 'cannot locate data file'
model = LlamaForCausalLM.from_pretrained(
base_model,
load_in_8bit=load_in_8bit,
torch_dtype=torch.float16,
device_map=device_map,
)
model = prepare_model_for_int8_training(model)
config = LoraConfig(
r=lora_r,
lora_alpha=lora_alpha,
target_modules=lora_target_modules,
lora_dropout=lora_dropout,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
model.to('cuda')
if all_exists(saved_full_model_path):
print(
f'WARNING, loading the full model at {saved_full_model_path}\n'
f'this is only for ad hoc use'
)
model.load_state_dict(torch.load(saved_full_model_path))
model.print_trainable_parameters() # Be more transparent about the % of trainable params.
tokenizer = LlamaTokenizer.from_pretrained(base_model)
tokenizer.add_special_tokens({
"eos_token": "</s>",
"bos_token": "<s>",
"unk_token": '<unk>',
"pad_token": '<unk>',
})
tokenizer.padding_side = "left" # Allow batched inference
DataPreparer = TranslationDataPreparer if translation_task else ContinuousCorrectionDataPreparer
data_keys = {
'nl_key': 'NL',
'fol_key': 'FOL'
} if translation_task else {
'nl_key': 'NL',
'pred_fol_key': 'Pred FOL',
'comment_key': 'Suggestion',
'correct_fol_key': 'Correct FOL'
}
if continuous_correction:
assert not translation_task, 'continuous_correction mode only works for correction task'
data_keys['prev_correct_key'] = 'Prev Correction'
data_preparer = DataPreparer(
prompt_template_path,
tokenizer,
train_on_inputs,
cutoff_len
)
prepare_input = partial(
data_preparer.prepare_input,
**data_keys
)
# load data
train_data, val_data = prepare_dataset(data_path, val_data_path, prepare_input, val_size, data_keys)
trainer = transformers.Trainer(
model=model,
train_dataset=train_data,
eval_dataset=val_data,
args=transformers.TrainingArguments(
per_device_train_batch_size=micro_batch_size,
gradient_accumulation_steps=batch_size // micro_batch_size,
warmup_steps=warmup_steps,
num_train_epochs=num_epochs,
learning_rate=learning_rate,
fp16=True,
logging_steps=logging_steps,
optim="adamw_torch",
evaluation_strategy="steps" if all_exists(val_data_path) else "no",
save_strategy="steps",
eval_steps=eval_steps if all_exists(val_data_path) else None,
save_steps=save_steps,
output_dir=output_dir,
save_total_limit=save_total_limit,
load_best_model_at_end=True if all_exists(val_data_path) else False,
group_by_length=group_by_length,
report_to="wandb" if use_wandb else None,
run_name=wandb_run_name if use_wandb else None,
),
data_collator=transformers.DataCollatorForSeq2Seq(
tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
),
)
old_collator = trainer.data_collator
trainer.data_collator = lambda data: dict(old_collator(data))
model.config.use_cache = False
old_state_dict = model.state_dict
model.state_dict = (
lambda self, *_, **__: get_peft_model_state_dict(
self, old_state_dict()
)
).__get__(model, type(model))
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
model.save_pretrained(output_dir)
if use_wandb:
wandb.finish()
if __name__ == "__main__":
fire.Fire(train)
| 7,533 | 32.04386 | 108 | py |
LogicLLaMA | LogicLLaMA-main/utils/gpt_requests.py | import json
import fire
import openai
from utils import Prompter, wrap_function_with_timeout, all_exists, make_parent_dirs
from typing import Optional, Dict, List, Union, Callable
from utils import all_exists
import os
from functools import partial
from tqdm import tqdm
folio_5_shot = \
"""
Here are some examples you can refer to:
### NL:
All citizens of Lawton Park use the zip code 98199.
### Comments:
N/A
### FOL:
∀x (Citizenof(x, lawtonPark) → Usezipcode(x, number98199))
### NL:
People either regularly drink coffee or joke about being addicted to caffeine.
### Comments:
N/A
### FOL:
∀x (Drinks(x) ⊕ Jokes(x))
### NL:
Museum of Modern Art (MoMA) is a museum if NYC.
### Comments:
N/A
### FOL:
Museum(museumofModernArt) ∧ InNYC(museumofModernArt)
### NL:
Ghosts do not exist.
### Comments:
N/A
### FOL:
∀x (¬Ghost(x))
### NL:
Some American radio personalities are also music supervisors.
### Comments:
N/A
### FOL:
∃x (American(x) ∧ MusicSupervisor(x) ∧ RadioPersonality(x))
### NL:
Holding companies hold several companies.
### Comments:
N/A
### FOL:
∀x ∃y (HoldingCompany(x) → Company(y) ∧ Holds(x, y))
---
"""
logicnli_5_shot = \
"""
Here are some examples you can refer to:
### NL:
If someone is entire, then he is not serious, and vice versa.
### Comments:
N/A
### FOL:
∃x entire(x) ↔ ¬serious(x)
### NL:
If there is at least one people who is both not excited and not timid, then Jonathan is elderly.
### Comments:
N/A
### FOL:
∀x (¬excited(x) ∧ ¬timid(x)) → elderly(Jonathan)
### NL:
Someone who is eithor not fresh or entire is always not serious.
### Comments:
N/A
### FOL:
∀x (¬concerned(x) ∨ fresh(x)) → entire(John)
### NL:
If Nathalie is not blue, then Collier is entire.
### Comments:
N/A
### FOL:
¬blue(Nathalie) → entire(Collier)
### NL:
Someone is courteous and not elderly if and only if he is not excited and not various.
### Comments:
N/A
### FOL:
∃x (courteous(x) ∧ ¬elderly(x)) ↔ (¬excited(x) ∧ ¬various(x))
---
"""
translate_system_prompt_5_shot = \
"""
You are a helpful translator that translates natural language (NL) statements into first-order
logic (FOL) rules. You should
1. Generate the FOL rule that ACCURATELY reflect the meaning of the NL statement
2. USE the following logical operators: ⊕ (either or), ∨ (disjunction), ∧ (conjunction), → (implication), ∀ (universal), ∃ (existential), ¬ (negation), ↔ (equivalence)
3. *NEVER USE* the following symbols for FOL: "!", "≠", "%", "="
Generation Format: you SHOULD ALWAYS generate the translated FOL in the following format
\"\"\"
### Comments:
N/A
### FOL:
{your translated FOL}
\"\"\"
---
"""
translate_system_prompt_zero_shot = \
"""
You are a helpful translator that translates natural language (NL) statements into first-order
logic (FOL) rules. You should
1. Generate the FOL rule that ACCURATELY reflect the meaning of the NL statement
2. USE the following logical operators: ⊕ (either or), ∨ (disjunction), ∧ (conjunction), → (implication), ∀ (universal), ∃ (existential), ¬ (negation), ↔ (equivalence)
3. *NEVER USE* the following symbols for FOL: "!", "≠", "%", "="
Generation Format: you SHOULD ALWAYS generate the translated FOL in the following format
\"\"\"
### Comments:
N/A
### FOL:
{your translated FOL}
\"\"\"
---
"""
fix_prompt = \
"""
---
Below is one suggestion on how to modify this rule to match the meaning of NL, you SHOULD EITHER:
1. Fully accept it and change the rule, if you think it is a good suggestion.
2. Partially accept it and change whatever you think is needed without fully following the suggestion.
3. Or reject it and do not change the rule, if you think no change is needed.
In either case, generate the new FOL in the following format:
\"\"\"
### Comments:
{your comments}
### FOL:
{your new FOL}
\"\"\"
---
Suggestion:
---
"""
class GPTTranslationRequestManager:
model_gpt4: str = 'gpt-4'
model_gpt35: str = 'gpt-3.5-turbo'
def __init__(self, api_key: str):
"""
Args:
api_key: either the key string or the path to the key file
"""
if os.path.isfile(api_key):
with open(api_key, 'r') as f:
openai.api_key = f.read().strip()
else:
openai.api_key = api_key
def default_request(
self,
input_prompt: str,
system_prompt: str,
model: str,
resp_split_func: Optional[Callable] = None,
tqdm: Optional[Callable] = None
):
logger = tqdm.write if all_exists(tqdm) else print
assert model == GPTTranslationRequestManager.model_gpt35 or model == GPTTranslationRequestManager.model_gpt4
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": input_prompt},
]
)
resp_str = response['choices'][0]['message']['content']
except:
logger('something wrong with the request')
return None
if all_exists(resp_split_func):
return resp_split_func(resp_str)
return resp_str
def translate_dataset(
self,
dataset: Union[str, Dict, List],
resp_key: str,
timeout: int = 10,
resp_split_func: Optional[Callable] = None,
n_retry: int = 3,
tqdm: Optional[Callable] = None,
verbose: bool = False,
save_path: str = None,
model: str = 'gpt-3.5-turbo',
zero_shot: bool = False,
few_shot_src: Optional[str] = 'folio',
save_every_nrequests: int = 10,
src: Optional[str] = None,
):
request_with_timeout = wrap_function_with_timeout(self.default_request, timeout)
logger = tqdm.write if all_exists(tqdm) else print
if zero_shot:
translate_system_prompt = translate_system_prompt_zero_shot
else:
assert all_exists(few_shot_src)
if few_shot_src == 'folio':
few_shot_example = folio_5_shot
elif few_shot_src == 'logicnli':
few_shot_example = logicnli_5_shot
else:
raise ValueError(few_shot_src)
translate_system_prompt = translate_system_prompt_5_shot + few_shot_example
if isinstance(dataset, str):
assert os.path.isfile(dataset) and dataset.endswith('json')
with open(dataset, 'r') as f:
dataset = json.load(f)
if isinstance(dataset, Dict):
assert 'data' in dataset, 'unknown format'
dataset = dataset['data']
assert isinstance(dataset, List)
assert all_exists(save_path)
make_parent_dirs(save_path)
pbar = tqdm(dataset, leave=False) if all_exists(tqdm) else None
update_bar = pbar.update if all_exists(tqdm) else lambda: None
for ind, entry in enumerate(dataset):
src_is_valid = (src is None) or (all_exists(src) and entry['src'] == src)
resp_exists = (resp_key in entry) and all_exists(entry[resp_key])
should_request = src_is_valid and (not resp_exists)
if not should_request:
update_bar()
continue
resp = None
for _ in range(n_retry):
prompt = f"### NL:\n{entry['NL']}\n"
resp = request_with_timeout(prompt, translate_system_prompt, model, resp_split_func, tqdm)
if all_exists(resp):
break
if resp is None:
logger(f'sample {ind} no response')
entry[resp_key] = resp[1][1] if all_exists(resp) else None # put the parsed response here
entry[resp_key + '_full response'] = resp # also keep the orignal response here
if verbose:
logger('NL: {0}\nGT FOL: {1}\nGPT FOL:{2}\n---\n'.format(
entry['NL'],
entry['FOL'] if 'FOL' in entry else None,
resp[1][1] if all_exists(resp) else None
))
if ind % save_every_nrequests == 0:
with open(save_path, 'w') as f:
json.dump(dataset, f)
update_bar()
with open(save_path, 'w') as f:
json.dump(dataset, f)
def gpt_translation(
prompt_path='data/prompt_templates',
**kwargs
):
prompter = Prompter(prompt_path)
resp_split_func = lambda full_str: prompter.get_response('translate_prompt_template', full_str)
manager = GPTTranslationRequestManager(kwargs['api_key'])
del kwargs['api_key']
manager.translate_dataset(
resp_split_func=resp_split_func,
tqdm=tqdm,
**kwargs
)
if __name__ == '__main__':
fire.Fire(gpt_translation) | 8,957 | 26.906542 | 167 | py |
big-sleep | big-sleep-main/setup.py | import sys
from setuptools import setup, find_packages
sys.path[0:0] = ['big_sleep']
from version import __version__
setup(
name = 'big-sleep',
packages = find_packages(),
include_package_data = True,
entry_points={
'console_scripts': [
'dream = big_sleep.cli:main',
],
},
version = __version__,
license='MIT',
description = 'Big Sleep',
author = 'Ryan Murdock, Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/big-sleep',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'text to image',
'generative adversarial networks'
],
install_requires=[
'torch>=1.7.1',
'einops>=0.3',
'fire',
'ftfy',
'pytorch-pretrained-biggan',
'regex',
'torchvision>=0.8.2',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 1,080 | 22 | 65 | py |
big-sleep | big-sleep-main/big_sleep/resample.py | """Good differentiable image resampling for PyTorch."""
from functools import update_wrapper
import math
import torch
from torch.nn import functional as F
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def odd(fn):
return update_wrapper(lambda x: torch.sign(x) * fn(abs(x)), fn)
def _to_linear_srgb(input):
cond = input <= 0.04045
a = input / 12.92
b = ((input + 0.055) / 1.055)**2.4
return torch.where(cond, a, b)
def _to_nonlinear_srgb(input):
cond = input <= 0.0031308
a = 12.92 * input
b = 1.055 * input**(1/2.4) - 0.055
return torch.where(cond, a, b)
to_linear_srgb = odd(_to_linear_srgb)
to_nonlinear_srgb = odd(_to_nonlinear_srgb)
def resample(input, size, align_corners=True, is_srgb=False):
n, c, h, w = input.shape
dh, dw = size
if is_srgb:
input = to_linear_srgb(input)
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 3), 3).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 3), 3).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
input = F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
if is_srgb:
input = to_nonlinear_srgb(input)
return input
| 2,062 | 24.7875 | 87 | py |
big-sleep | big-sleep-main/big_sleep/clip.py | from collections import OrderedDict
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from torch import nn
from pathlib import Path
import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform():
return Compose([
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
if isinstance(vision_layers, (tuple, list)):
vision_heads = vision_width * 32 // 64
self.visual = ModifiedResNet(
layers=vision_layers,
output_dim=embed_dim,
heads=vision_heads,
input_resolution=image_resolution,
width=vision_width
)
else:
vision_heads = vision_width // 64
self.visual = VisualTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask()
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if isinstance(self.visual, ModifiedResNet):
if self.visual.attnpool is not None:
std = self.visual.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std)
for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
def convert_weights(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = CLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
convert_weights(model)
model.load_state_dict(state_dict)
return model.eval()
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
import gzip
_tokenizer = SimpleTokenizer()
| 28,958 | 37.509309 | 178 | py |
big-sleep | big-sleep-main/big_sleep/biggan.py | # this code is a copy from huggingface
# with some minor modifications
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import json
import copy
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BIGGAN_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
Path.home() / '.pytorch_pretrained_biggan'))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BIGGAN_CACHE = os.getenv('PYTORCH_PRETRAINED_BIGGAN_CACHE',
os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_biggan'))
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PRETRAINED_MODEL_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-pytorch_model.bin",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-pytorch_model.bin",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-pytorch_model.bin",
}
PRETRAINED_CONFIG_ARCHIVE_MAP = {
'biggan-deep-128': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-128-config.json",
'biggan-deep-256': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-256-config.json",
'biggan-deep-512': "https://s3.amazonaws.com/models.huggingface.co/biggan/biggan-deep-512-config.json",
}
WEIGHTS_NAME = 'pytorch_model.bin'
CONFIG_NAME = 'config.json'
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
class BigGANConfig(object):
""" Configuration class to store the configuration of a `BigGAN`.
Defaults are for the 128x128 model.
layers tuple are (up-sample in the layer ?, input channels, output channels)
"""
def __init__(self,
output_dim=128,
z_dim=128,
class_embed_dim=128,
channel_width=128,
num_classes=1000,
layers=[(False, 16, 16),
(True, 16, 16),
(False, 16, 16),
(True, 16, 8),
(False, 8, 8),
(True, 8, 4),
(False, 4, 4),
(True, 4, 2),
(False, 2, 2),
(True, 2, 1)],
attention_layer_position=8,
eps=1e-4,
n_stats=51):
"""Constructs BigGANConfig. """
self.output_dim = output_dim
self.z_dim = z_dim
self.class_embed_dim = class_embed_dim
self.channel_width = channel_width
self.num_classes = num_classes
self.layers = layers
self.attention_layer_position = attention_layer_position
self.eps = eps
self.n_stats = n_stats
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BigGANConfig` from a Python dictionary of parameters."""
config = BigGANConfig()
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BigGANConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def snconv2d(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Conv2d(**kwargs), eps=eps)
def snlinear(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Linear(**kwargs), eps=eps)
def sn_embedding(eps=1e-12, **kwargs):
return nn.utils.spectral_norm(nn.Embedding(**kwargs), eps=eps)
class SelfAttn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels, eps=1e-12):
super(SelfAttn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2,
kernel_size=1, bias=False, eps=eps)
self.snconv1x1_o_conv = snconv2d(in_channels=in_channels//2, out_channels=in_channels,
kernel_size=1, bias=False, eps=eps)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.gamma = nn.Parameter(torch.zeros(1))
def forward(self, x):
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g - o_conv
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_o_conv(attn_g)
# Out
out = x + self.gamma*attn_g
return out
class BigGANBatchNorm(nn.Module):
""" This is a batch norm module that can handle conditional input and can be provided with pre-computed
activation means and variances for various truncation parameters.
We cannot just rely on torch.batch_norm since it cannot handle
batched weights (pytorch 1.0.1). We computate batch_norm our-self without updating running means and variances.
If you want to train this model you should add running means and variance computation logic.
"""
def __init__(self, num_features, condition_vector_dim=None, n_stats=51, eps=1e-4, conditional=True):
super(BigGANBatchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.conditional = conditional
# We use pre-computed statistics for n_stats values of truncation between 0 and 1
self.register_buffer('running_means', torch.zeros(n_stats, num_features))
self.register_buffer('running_vars', torch.ones(n_stats, num_features))
self.step_size = 1.0 / (n_stats - 1)
if conditional:
assert condition_vector_dim is not None
self.scale = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
self.offset = snlinear(in_features=condition_vector_dim, out_features=num_features, bias=False, eps=eps)
else:
self.weight = torch.nn.Parameter(torch.Tensor(num_features))
self.bias = torch.nn.Parameter(torch.Tensor(num_features))
def forward(self, x, truncation, condition_vector=None):
# Retreive pre-computed statistics associated to this truncation
coef, start_idx = math.modf(truncation / self.step_size)
start_idx = int(start_idx)
if coef != 0.0: # Interpolate
running_mean = self.running_means[start_idx] * coef + self.running_means[start_idx + 1] * (1 - coef)
running_var = self.running_vars[start_idx] * coef + self.running_vars[start_idx + 1] * (1 - coef)
else:
running_mean = self.running_means[start_idx]
running_var = self.running_vars[start_idx]
if self.conditional:
running_mean = running_mean.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
running_var = running_var.unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
weight = 1 + self.scale(condition_vector).unsqueeze(-1).unsqueeze(-1)
bias = self.offset(condition_vector).unsqueeze(-1).unsqueeze(-1)
out = (x - running_mean) / torch.sqrt(running_var + self.eps) * weight + bias
else:
out = F.batch_norm(x, running_mean, running_var, self.weight, self.bias,
training=False, momentum=0.0, eps=self.eps)
return out
class GenBlock(nn.Module):
def __init__(self, in_size, out_size, condition_vector_dim, reduction_factor=4, up_sample=False,
n_stats=51, eps=1e-12):
super(GenBlock, self).__init__()
self.up_sample = up_sample
self.drop_channels = (in_size != out_size)
middle_size = in_size // reduction_factor
self.bn_0 = BigGANBatchNorm(in_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_0 = snconv2d(in_channels=in_size, out_channels=middle_size, kernel_size=1, eps=eps)
self.bn_1 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_1 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_2 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_2 = snconv2d(in_channels=middle_size, out_channels=middle_size, kernel_size=3, padding=1, eps=eps)
self.bn_3 = BigGANBatchNorm(middle_size, condition_vector_dim, n_stats=n_stats, eps=eps, conditional=True)
self.conv_3 = snconv2d(in_channels=middle_size, out_channels=out_size, kernel_size=1, eps=eps)
self.relu = nn.ReLU()
def forward(self, x, cond_vector, truncation):
x0 = x
x = self.bn_0(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_0(x)
x = self.bn_1(x, truncation, cond_vector)
x = self.relu(x)
if self.up_sample:
x = F.interpolate(x, scale_factor=2, mode='nearest')
x = self.conv_1(x)
x = self.bn_2(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_2(x)
x = self.bn_3(x, truncation, cond_vector)
x = self.relu(x)
x = self.conv_3(x)
if self.drop_channels:
new_channels = x0.shape[1] // 2
x0 = x0[:, :new_channels, ...]
if self.up_sample:
x0 = F.interpolate(x0, scale_factor=2, mode='nearest')
out = x + x0
return out
class Generator(nn.Module):
def __init__(self, config):
super(Generator, self).__init__()
self.config = config
ch = config.channel_width
condition_vector_dim = config.z_dim * 2
self.gen_z = snlinear(in_features=condition_vector_dim,
out_features=4 * 4 * 16 * ch, eps=config.eps)
layers = []
for i, layer in enumerate(config.layers):
if i == config.attention_layer_position:
layers.append(SelfAttn(ch*layer[1], eps=config.eps))
layers.append(GenBlock(ch*layer[1],
ch*layer[2],
condition_vector_dim,
up_sample=layer[0],
n_stats=config.n_stats,
eps=config.eps))
self.layers = nn.ModuleList(layers)
self.bn = BigGANBatchNorm(ch, n_stats=config.n_stats, eps=config.eps, conditional=False)
self.relu = nn.ReLU()
self.conv_to_rgb = snconv2d(in_channels=ch, out_channels=ch, kernel_size=3, padding=1, eps=config.eps)
self.tanh = nn.Tanh()
def forward(self, cond_vector, truncation):
z = self.gen_z(cond_vector[0].unsqueeze(0))
# We use this conversion step to be able to use TF weights:
# TF convention on shape is [batch, height, width, channels]
# PT convention on shape is [batch, channels, height, width]
z = z.view(-1, 4, 4, 16 * self.config.channel_width)
z = z.permute(0, 3, 1, 2).contiguous()
next_available_latent_index = 1
for layer in self.layers:
if isinstance(layer, GenBlock):
z = layer(z, cond_vector[next_available_latent_index].unsqueeze(0), truncation)
next_available_latent_index += 1
else:
z = layer(z)
z = self.bn(z, truncation)
z = self.relu(z)
z = self.conv_to_rgb(z)
z = z[:, :3, ...]
z = self.tanh(z)
return z
class BigGAN(nn.Module):
"""BigGAN Generator."""
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
model_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
model_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
try:
resolved_model_file = cached_path(model_file, cache_dir=cache_dir)
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error("Wrong model name, should be a valid path to a folder containing "
"a {} file and a {} file or a model name in {}".format(
WEIGHTS_NAME, CONFIG_NAME, PRETRAINED_MODEL_ARCHIVE_MAP.keys()))
raise
logger.info("loading model {} from cache at {}".format(pretrained_model_name_or_path, resolved_model_file))
# Load config
config = BigGANConfig.from_json_file(resolved_config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
state_dict = torch.load(resolved_model_file, map_location='cpu' if not torch.cuda.is_available() else None)
model.load_state_dict(state_dict, strict=False)
return model
def __init__(self, config):
super(BigGAN, self).__init__()
self.config = config
self.embeddings = nn.Linear(config.num_classes, config.z_dim, bias=False)
self.generator = Generator(config)
def forward(self, z, class_label, truncation):
assert 0 < truncation <= 1
embed = self.embeddings(class_label)
cond_vector = torch.cat((z, embed), dim=1)
z = self.generator(cond_vector, truncation)
return z
| 22,399 | 37.62069 | 119 | py |
big-sleep | big-sleep-main/big_sleep/cli.py | import fire
import random as rnd
from big_sleep import Imagine, version
from pathlib import Path
from .version import __version__;
def train(
text=None,
img=None,
text_min="",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
epochs = 20,
iterations = 1050,
save_every = 50,
overwrite = False,
save_progress = False,
save_date_time = False,
bilinear = False,
open_folder = True,
seed = 0,
append_seed = False,
random = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_best = False,
experimental_resample = False,
ema_decay = 0.5,
num_cutouts = 128,
center_bias = False,
larger_model = False
):
print(f'Starting up... v{__version__}')
if random:
seed = rnd.randint(0, 1e6)
imagine = Imagine(
text=text,
img=img,
text_min=text_min,
lr = lr,
image_size = image_size,
gradient_accumulate_every = gradient_accumulate_every,
epochs = epochs,
iterations = iterations,
save_every = save_every,
save_progress = save_progress,
bilinear = bilinear,
seed = seed,
append_seed = append_seed,
torch_deterministic = torch_deterministic,
open_folder = open_folder,
max_classes = max_classes,
class_temperature = class_temperature,
save_date_time = save_date_time,
save_best = save_best,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_model
)
if not overwrite and imagine.filename.exists():
answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()
if answer not in ('yes', 'y'):
exit()
imagine()
def main():
fire.Fire(train)
| 1,950 | 24.337662 | 97 | py |
big-sleep | big-sleep-main/big_sleep/ema.py | # Exponential Moving Average (from https://gist.github.com/crowsonkb/76b94d5238272722290734bf4725d204)
"""Exponential moving average for PyTorch. Adapted from
https://www.zijianhu.com/post/pytorch/ema/ by crowsonkb
"""
from copy import deepcopy
import torch
from torch import nn
class EMA(nn.Module):
def __init__(self, model, decay):
super().__init__()
self.model = model
self.decay = decay
self.register_buffer('accum', torch.tensor(1.))
self._biased = deepcopy(self.model)
self.average = deepcopy(self.model)
for param in self._biased.parameters():
param.detach_().zero_()
for param in self.average.parameters():
param.detach_().zero_()
self.update()
@torch.no_grad()
def update(self):
assert self.training, 'Update should only be called during training'
self.accum *= self.decay
model_params = dict(self.model.named_parameters())
biased_params = dict(self._biased.named_parameters())
average_params = dict(self.average.named_parameters())
assert model_params.keys() == biased_params.keys() == average_params.keys(), f'Model parameter keys incompatible with EMA stored parameter keys'
for name, param in model_params.items():
biased_params[name].mul_(self.decay)
biased_params[name].add_((1 - self.decay) * param)
average_params[name].copy_(biased_params[name])
average_params[name].div_(1 - self.accum)
model_buffers = dict(self.model.named_buffers())
biased_buffers = dict(self._biased.named_buffers())
average_buffers = dict(self.average.named_buffers())
assert model_buffers.keys() == biased_buffers.keys() == average_buffers.keys()
for name, buffer in model_buffers.items():
biased_buffers[name].copy_(buffer)
average_buffers[name].copy_(buffer)
def forward(self, *args, **kwargs):
if self.training:
return self.model(*args, **kwargs)
return self.average(*args, **kwargs)
| 2,098 | 37.163636 | 152 | py |
big-sleep | big-sleep-main/big_sleep/big_sleep.py | import os
import sys
import subprocess
import signal
import string
import re
from datetime import datetime
from pathlib import Path
import random
import torch
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from torchvision.utils import save_image
import torchvision.transforms as T
from PIL import Image
from tqdm import tqdm, trange
from big_sleep.ema import EMA
from big_sleep.resample import resample
from big_sleep.biggan import BigGAN
from big_sleep.clip import load, tokenize
assert torch.cuda.is_available(), 'CUDA must be available in order to use Big Sleep'
# graceful keyboard interrupt
terminate = False
def signal_handling(signum,frame):
print('detecting keyboard interrupt, gracefully exiting')
global terminate
terminate = True
signal.signal(signal.SIGINT,signal_handling)
# helpers
def exists(val):
return val is not None
def open_folder(path):
if os.path.isfile(path):
path = os.path.dirname(path)
if not os.path.isdir(path):
return
cmd_list = None
if sys.platform == 'darwin':
cmd_list = ['open', '--', path]
elif sys.platform == 'linux2' or sys.platform == 'linux':
cmd_list = ['xdg-open', path]
elif sys.platform in ['win32', 'win64']:
cmd_list = ['explorer', path.replace('/','\\')]
if cmd_list == None:
return
try:
subprocess.check_call(cmd_list)
except subprocess.CalledProcessError:
pass
except OSError:
pass
def create_text_path(text=None, img=None, encoding=None):
input_name = ""
if text is not None:
input_name += text
if img is not None:
if isinstance(img, str):
img_name = "".join(img.split(".")[:-1]) # replace spaces by underscores, remove img extension
img_name = img_name.split("/")[-1] # only take img name, not path
else:
img_name = "PIL_img"
input_name += "_" + img_name
if encoding is not None:
input_name = "your_encoding"
return input_name.replace("-", "_").replace(",", "").replace(" ", "_").replace("|", "--").strip('-_')[:255]
# tensor helpers
def differentiable_topk(x, k, temperature=1.):
n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == (k - 1)
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x = x.scatter(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(n, k, dim).sum(dim = 1)
def create_clip_img_transform(image_width):
clip_mean = [0.48145466, 0.4578275, 0.40821073]
clip_std = [0.26862954, 0.26130258, 0.27577711]
transform = T.Compose([
#T.ToPILImage(),
T.Resize(image_width),
T.CenterCrop((image_width, image_width)),
T.ToTensor(),
T.Normalize(mean=clip_mean, std=clip_std)
])
return transform
def rand_cutout(image, size, center_bias=False, center_focus=2):
width = image.shape[-1]
min_offset = 0
max_offset = width - size
if center_bias:
# sample around image center
center = max_offset / 2
std = center / center_focus
offset_x = int(random.gauss(mu=center, sigma=std))
offset_y = int(random.gauss(mu=center, sigma=std))
# resample uniformly if over boundaries
offset_x = random.randint(min_offset, max_offset) if (offset_x > max_offset or offset_x < min_offset) else offset_x
offset_y = random.randint(min_offset, max_offset) if (offset_y > max_offset or offset_y < min_offset) else offset_y
else:
offset_x = random.randint(min_offset, max_offset)
offset_y = random.randint(min_offset, max_offset)
cutout = image[:, :, offset_x:offset_x + size, offset_y:offset_y + size]
return cutout
# load biggan
class Latents(torch.nn.Module):
def __init__(
self,
num_latents = 15,
num_classes = 1000,
z_dim = 128,
max_classes = None,
class_temperature = 2.
):
super().__init__()
self.normu = torch.nn.Parameter(torch.zeros(num_latents, z_dim).normal_(std = 1))
self.cls = torch.nn.Parameter(torch.zeros(num_latents, num_classes).normal_(mean = -3.9, std = .3))
self.register_buffer('thresh_lat', torch.tensor(1))
assert not exists(max_classes) or max_classes > 0 and max_classes <= num_classes, f'max_classes must be between 0 and {num_classes}'
self.max_classes = max_classes
self.class_temperature = class_temperature
def forward(self):
if exists(self.max_classes):
classes = differentiable_topk(self.cls, self.max_classes, temperature = self.class_temperature)
else:
classes = torch.sigmoid(self.cls)
return self.normu, classes
class Model(nn.Module):
def __init__(
self,
image_size,
max_classes = None,
class_temperature = 2.,
ema_decay = 0.99
):
super().__init__()
assert image_size in (128, 256, 512), 'image size must be one of 128, 256, or 512'
self.biggan = BigGAN.from_pretrained(f'biggan-deep-{image_size}')
self.max_classes = max_classes
self.class_temperature = class_temperature
self.ema_decay\
= ema_decay
self.init_latents()
def init_latents(self):
latents = Latents(
num_latents = len(self.biggan.config.layers) + 1,
num_classes = self.biggan.config.num_classes,
z_dim = self.biggan.config.z_dim,
max_classes = self.max_classes,
class_temperature = self.class_temperature
)
self.latents = EMA(latents, self.ema_decay)
def forward(self):
self.biggan.eval()
out = self.biggan(*self.latents(), 1)
return (out + 1) / 2
class BigSleep(nn.Module):
def __init__(
self,
num_cutouts = 128,
loss_coef = 100,
image_size = 512,
bilinear = False,
max_classes = None,
class_temperature = 2.,
experimental_resample = False,
ema_decay = 0.99,
center_bias = False,
larger_clip = False
):
super().__init__()
self.loss_coef = loss_coef
self.image_size = image_size
self.num_cutouts = num_cutouts
self.experimental_resample = experimental_resample
self.center_bias = center_bias
self.interpolation_settings = {'mode': 'bilinear', 'align_corners': False} if bilinear else {'mode': 'nearest'}
model_name = 'ViT-B/32' if not larger_clip else 'ViT-L/14'
self.perceptor, self.normalize_image = load(model_name, jit = False)
self.model = Model(
image_size = image_size,
max_classes = max_classes,
class_temperature = class_temperature,
ema_decay = ema_decay
)
def reset(self):
self.model.init_latents()
def sim_txt_to_img(self, text_embed, img_embed, text_type="max"):
sign = -1
if text_type == "min":
sign = 1
return sign * self.loss_coef * torch.cosine_similarity(text_embed, img_embed, dim = -1).mean()
def forward(self, text_embeds, text_min_embeds=[], return_loss = True):
width, num_cutouts = self.image_size, self.num_cutouts
out = self.model()
if not return_loss:
return out
pieces = []
for ch in range(num_cutouts):
# sample cutout size
size = int(width * torch.zeros(1,).normal_(mean=.8, std=.3).clip(.5, .95))
# get cutout
apper = rand_cutout(out, size, center_bias=self.center_bias)
if (self.experimental_resample):
apper = resample(apper, (224, 224))
else:
apper = F.interpolate(apper, (224, 224), **self.interpolation_settings)
pieces.append(apper)
into = torch.cat(pieces)
into = self.normalize_image(into)
image_embed = self.perceptor.encode_image(into)
latents, soft_one_hot_classes = self.model.latents()
num_latents = latents.shape[0]
latent_thres = self.model.latents.model.thresh_lat
lat_loss = torch.abs(1 - torch.std(latents, dim=1)).mean() + \
torch.abs(torch.mean(latents, dim = 1)).mean() + \
4 * torch.max(torch.square(latents).mean(), latent_thres)
for array in latents:
mean = torch.mean(array)
diffs = array - mean
var = torch.mean(torch.pow(diffs, 2.0))
std = torch.pow(var, 0.5)
zscores = diffs / std
skews = torch.mean(torch.pow(zscores, 3.0))
kurtoses = torch.mean(torch.pow(zscores, 4.0)) - 3.0
lat_loss = lat_loss + torch.abs(kurtoses) / num_latents + torch.abs(skews) / num_latents
cls_loss = ((50 * torch.topk(soft_one_hot_classes, largest = False, dim = 1, k = 999)[0]) ** 2).mean()
results = []
for txt_embed in text_embeds:
results.append(self.sim_txt_to_img(txt_embed, image_embed))
for txt_min_embed in text_min_embeds:
results.append(self.sim_txt_to_img(txt_min_embed, image_embed, "min"))
sim_loss = sum(results).mean()
return out, (lat_loss, cls_loss, sim_loss)
class Imagine(nn.Module):
def __init__(
self,
*,
text=None,
img=None,
encoding=None,
text_min = "",
lr = .07,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 20,
iterations = 1050,
save_progress = False,
bilinear = False,
open_folder = True,
seed = None,
append_seed = False,
torch_deterministic = False,
max_classes = None,
class_temperature = 2.,
save_date_time = False,
save_best = False,
experimental_resample = False,
ema_decay = 0.99,
num_cutouts = 128,
center_bias = False,
larger_clip = False
):
super().__init__()
if torch_deterministic:
assert not bilinear, 'the deterministic (seeded) operation does not work with interpolation (PyTorch 1.7.1)'
torch.set_deterministic(True)
self.seed = seed
self.append_seed = append_seed
if exists(seed):
print(f'setting seed of {seed}')
if seed == 0:
print('you can override this with --seed argument in the command line, or --random for a randomly chosen one')
torch.manual_seed(seed)
self.epochs = epochs
self.iterations = iterations
model = BigSleep(
image_size = image_size,
bilinear = bilinear,
max_classes = max_classes,
class_temperature = class_temperature,
experimental_resample = experimental_resample,
ema_decay = ema_decay,
num_cutouts = num_cutouts,
center_bias = center_bias,
larger_clip = larger_clip
).cuda()
self.model = model
self.lr = lr
self.optimizer = Adam(model.model.latents.model.parameters(), lr)
self.gradient_accumulate_every = gradient_accumulate_every
self.save_every = save_every
self.save_progress = save_progress
self.save_date_time = save_date_time
self.save_best = save_best
self.current_best_score = 0
self.open_folder = open_folder
self.total_image_updates = (self.epochs * self.iterations) / self.save_every
self.encoded_texts = {
"max": [],
"min": []
}
# create img transform
self.clip_transform = create_clip_img_transform(224)
# create starting encoding
self.set_clip_encoding(text=text, img=img, encoding=encoding, text_min=text_min)
@property
def seed_suffix(self):
return f'.{self.seed}' if self.append_seed and exists(self.seed) else ''
def set_text(self, text):
self.set_clip_encoding(text = text)
def create_clip_encoding(self, text=None, img=None, encoding=None):
self.text = text
self.img = img
if encoding is not None:
encoding = encoding.cuda()
#elif self.create_story:
# encoding = self.update_story_encoding(epoch=0, iteration=1)
elif text is not None and img is not None:
encoding = (self.create_text_encoding(text) + self.create_img_encoding(img)) / 2
elif text is not None:
encoding = self.create_text_encoding(text)
elif img is not None:
encoding = self.create_img_encoding(img)
return encoding
def create_text_encoding(self, text):
tokenized_text = tokenize(text).cuda()
with torch.no_grad():
text_encoding = self.model.perceptor.encode_text(tokenized_text).detach()
return text_encoding
def create_img_encoding(self, img):
if isinstance(img, str):
img = Image.open(img)
normed_img = self.clip_transform(img).unsqueeze(0).cuda()
with torch.no_grad():
img_encoding = self.model.perceptor.encode_image(normed_img).detach()
return img_encoding
def encode_multiple_phrases(self, text, img=None, encoding=None, text_type="max"):
if text is not None and "|" in text:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=prompt_min, img=img, encoding=encoding) for prompt_min in text.split("|")]
else:
self.encoded_texts[text_type] = [self.create_clip_encoding(text=text, img=img, encoding=encoding)]
def encode_max_and_min(self, text, img=None, encoding=None, text_min=""):
self.encode_multiple_phrases(text, img=img, encoding=encoding)
if text_min is not None and text_min != "":
self.encode_multiple_phrases(text_min, img=img, encoding=encoding, text_type="min")
def set_clip_encoding(self, text=None, img=None, encoding=None, text_min=""):
self.current_best_score = 0
self.text = text
self.text_min = text_min
if len(text_min) > 0:
text = text + "_wout_" + text_min[:255] if text is not None else "wout_" + text_min[:255]
text_path = create_text_path(text=text, img=img, encoding=encoding)
if self.save_date_time:
text_path = datetime.now().strftime("%y%m%d-%H%M%S-") + text_path
self.text_path = text_path
self.filename = Path(f'./{text_path}{self.seed_suffix}.png')
self.encode_max_and_min(text, img=img, encoding=encoding, text_min=text_min) # Tokenize and encode each prompt
def reset(self):
self.model.reset()
self.model = self.model.cuda()
self.optimizer = Adam(self.model.model.latents.parameters(), self.lr)
def train_step(self, epoch, i, pbar=None):
total_loss = 0
for _ in range(self.gradient_accumulate_every):
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
loss = sum(losses) / self.gradient_accumulate_every
total_loss += loss
loss.backward()
self.optimizer.step()
self.model.model.latents.update()
self.optimizer.zero_grad()
if (i + 1) % self.save_every == 0:
with torch.no_grad():
self.model.model.latents.eval()
out, losses = self.model(self.encoded_texts["max"], self.encoded_texts["min"])
top_score, best = torch.topk(losses[2], k=1, largest=False)
image = self.model.model()[best].cpu()
self.model.model.latents.train()
save_image(image, str(self.filename))
if pbar is not None:
pbar.update(1)
else:
print(f'image updated at "./{str(self.filename)}"')
if self.save_progress:
total_iterations = epoch * self.iterations + i
num = total_iterations // self.save_every
save_image(image, Path(f'./{self.text_path}.{num}{self.seed_suffix}.png'))
if self.save_best and top_score.item() < self.current_best_score:
self.current_best_score = top_score.item()
save_image(image, Path(f'./{self.text_path}{self.seed_suffix}.best.png'))
return out, total_loss
def forward(self):
penalizing = ""
if len(self.text_min) > 0:
penalizing = f'penalizing "{self.text_min}"'
print(f'Imagining "{self.text_path}" {penalizing}...')
with torch.no_grad():
self.model(self.encoded_texts["max"][0]) # one warmup step due to issue with CLIP and CUDA
if self.open_folder:
open_folder('./')
self.open_folder = False
image_pbar = tqdm(total=self.total_image_updates, desc='image update', position=2, leave=True)
epoch_pbar = trange(self.epochs, desc = ' epochs', position=0, leave=True)
for epoch in (ep for ep in epoch_pbar if not terminate):
pbar = trange(self.iterations, desc=' iteration', position=1, leave=True)
image_pbar.update(0)
for i in (it for it in pbar if not terminate):
out, loss = self.train_step(epoch, i, image_pbar)
pbar.set_description(f'loss: {loss.item():04.2f}')
| 17,794 | 34.167984 | 150 | py |
big-sleep | big-sleep-main/test/multi_prompt_minmax.py | import time
import shutil
import torch
from big_sleep import Imagine
terminate = False
def signal_handling(signum,frame):
global terminate
terminate = True
num_attempts = 4
for attempt in range(num_attempts):
dream = Imagine(
text = "an armchair in the form of pikachu\\an armchair imitating pikachu\\abstract",
text_min = "blur\\zoom",
lr = 7e-2,
image_size = 512,
gradient_accumulate_every = 1,
save_every = 50,
epochs = 5,
iterations = 50,
save_progress = False,
bilinear = False,
open_folder = False,
seed = None,
torch_deterministic = False,
max_classes = 20,
class_temperature = 2.,
save_date_time = False,
save_best = True,
experimental_resample = True,
ema_decay = 0.99
)
dream()
shutil.copy(dream.textpath + ".best.png", f"{attempt}.png")
try:
time.sleep(2)
del dream
time.sleep(2)
torch.cuda.empty_cache()
except Exception:
torch.cuda.empty_cache() | 1,085 | 24.255814 | 93 | py |
Quantum | Quantum-master/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'paddle-quantum'
copyright = '2023, Baidu Inc'
author = 'Baidu Inc'
# The full version, including alpha/beta/rc tags
release = '2.4.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx_search.extension',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
# Autodoc
napoleon_numpy_docstring = False
autodoc_member_order = 'bysource'
autodoc_typehints = 'description'
autodoc_warningiserror = False
autodoc_inherit_docstrings = False
autodoc_docstring_signature = False
autodoc_typehints_description_target = 'documented'
autodoc_typehints_format = 'short'
| 2,355 | 31.273973 | 79 | py |
Quantum | Quantum-master/docs_zh_CN/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'paddle-quantum'
copyright = '2023, Baidu Inc'
author = 'Baidu Inc'
# The full version, including alpha/beta/rc tags
release = '2.4.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx_search.extension',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = 'index'
| 2,329 | 31.816901 | 79 | py |
SWTD3 | SWTD3-main/main.py | import argparse
import os
import socket
import gym
import numpy as np
import torch
import TD3
import SWTD3
import utils
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def evaluate_policy(agent, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = agent.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="SWTD3", help='Algorithm (default: SWTD3)')
parser.add_argument("--env", default="Hopper-v2", help='OpenAI Gym environment name')
parser.add_argument("--seed", default=0, type=int,
help='Seed number for PyTorch, NumPy and OpenAI Gym (default: 0)')
parser.add_argument("--gpu", default="0", type=int, help='GPU ordinal for multi-GPU computers (default: 0)')
parser.add_argument("--start_time_steps", default=25000, type=int, metavar='N',
help='Number of exploration time steps sampling random actions (default: 1000)')
parser.add_argument("--buffer_size", default=1000000, type=int,
help='Size of the experience replay buffer (default: '
'1000000)')
parser.add_argument("--eval_freq", default=1e3, metavar='N', help='Evaluation period in number of time '
'steps (default: 1000)')
parser.add_argument("--max_time_steps", default=1000000, type=int, metavar='N',
help='Maximum number of steps (default: 1000000)')
parser.add_argument("--exploration_noise", default=0.1, metavar='G', help='Std of Gaussian exploration noise')
parser.add_argument("--batch_size", default=256, metavar='N',
help='Batch size (default: 256)')
parser.add_argument("--discount", default=0.99, metavar='G',
help='Discount factor for reward (default: 0.99)')
parser.add_argument("--tau", default=0.005, type=float, metavar='G',
help='Learning rate in soft/hard updates of the target networks (default: 0.005)')
parser.add_argument("--policy_noise", default=0.2, metavar='G', help='Noise added to target policy during critic '
'update')
parser.add_argument("--noise_clip", default=0.5, metavar='G', help='Range to clip target policy noise')
parser.add_argument("--policy_freq", default=2, type=int, metavar='N', help='Frequency of delayed policy updates')
parser.add_argument("--save_model", action="store_true", help='Save model and optimizer parameters')
parser.add_argument("--load_model", default="", help='Model load file name; if empty, does not load')
args = parser.parse_args()
file_name = f"{args.policy}_{args.env}_{args.seed}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
if args.save_model and not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
env.action_space.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {"state_dim": state_dim, "action_dim": action_dim, "max_action": max_action, "discount": args.discount,
"tau": args.tau, "policy_noise": args.policy_noise * max_action,
"noise_clip": args.noise_clip * max_action, "policy_freq": args.policy_freq}
# Target policy smoothing is scaled wrt the action scale
# Initialize the algorithm
if args.policy == "TD3":
agent = TD3.TD3(**kwargs)
elif args.policy == "SWTD3":
agent = SWTD3.SWTD3(**kwargs, max_iterations=args.max_time_steps)
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
agent.load(f"./models/{policy_file}")
replay_buffer = utils.ExperienceReplayBuffer(state_dim, action_dim, max_size=args.buffer_size)
# Evaluate the untrained policy
evaluations = [f"HOST: {socket.gethostname()}", f"GPU: {torch.cuda.get_device_name(args.gpu)}",
evaluate_policy(agent, args.env, args.seed)]
state, done = env.reset(), False
episode_reward = 0
episode_time_steps = 0
episode_num = 0
for t in range(int(args.max_time_steps)):
print(agent.beta_lower)
episode_time_steps += 1
# Sample action from the action space or policy
if t < args.start_time_steps:
action = env.action_space.sample()
else:
action = (agent.select_action(np.array(state)) +
np.random.normal(0, max_action * args.exploration_noise, size=action_dim)) \
.clip(-max_action, max_action)
# Take the selected action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_time_steps < env._max_episode_steps else 0
# Store data in the experience replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
episode_reward += reward
# Train the agent after collecting sufficient samples
if t >= args.start_time_steps:
agent.update_parameters(replay_buffer, args.batch_size)
if done:
print(f"Total T: {t + 1} Episode Num: {episode_num + 1} Episode T: {episode_time_steps} Reward: "
f"{episode_reward:.3f}")
# Reset the environment
state, done = env.reset(), False
episode_reward = 0
episode_time_steps = 0
episode_num += 1
# Evaluate the agent over a number of episodes
if (t + 1) % args.eval_freq == 0:
evaluations.append(evaluate_policy(agent, args.env, args.seed))
np.save(f"./results/{file_name}", evaluations)
if args.save_model:
agent.save(f"./models/{file_name}")
| 6,850 | 41.552795 | 118 | py |
SWTD3 | SWTD3-main/utils.py | import numpy as np
import torch
class ExperienceReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
index = np.random.randint(0, self.size, size=batch_size)
return (
torch.FloatTensor(self.state[index]).to(self.device),
torch.FloatTensor(self.action[index]).to(self.device),
torch.FloatTensor(self.next_state[index]).to(self.device),
torch.FloatTensor(self.reward[index]).to(self.device),
torch.FloatTensor(self.not_done[index]).to(self.device)
)
| 1,398 | 34.871795 | 82 | py |
SWTD3 | SWTD3-main/TD3.py | import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of the Twin Delayed Deep Deterministic Policy Gradient algorithm (TD3)
# Paper: https://arxiv.org/abs/1802.09477
# Note: This implementation heavily relies on the author's PyTorch implementation of TD3.
# Repository: https://github.com/sfujim/TD3
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
# Initialize actor networks and optimizer
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
# Initialize critic networks and optimizer
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
# Initialize the training parameters
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def update_parameters(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample from the experience replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to the target policy and add target smoothing regularization
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
# Compute the target Q-value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get the current Q-value estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute the critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates, update actor networks every update period
if self.total_it % self.policy_freq == 0:
# Compute the actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Soft update the target networks
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
# Save the model parameters
def save(self, file_name):
torch.save(self.actor.state_dict(), file_name + "_actor")
torch.save(self.actor_optimizer.state_dict(), file_name + "_actor_optimizer")
torch.save(self.critic.state_dict(), file_name + "_critic")
torch.save(self.critic_optimizer.state_dict(), file_name + "_critic_optimizer")
# Load the model parameters
def load(self, filename):
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
| 5,894 | 34.512048 | 107 | py |
SWTD3 | SWTD3-main/SWTD3.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of the Stochastic Weighted Twin Delayed Deep Deterministic Policy Gradient algorithm (SWTD3)
# Note: This implementation heavily relies on the author's PyTorch implementation of TD3.
# Repository: https://github.com/sfujim/TD3
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class SWTD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
alpha=0.05,
max_iterations=1e6
):
# Initialize actor networks and optimizer
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
# Initialize critic networks and optimizer
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
# Initialize the training parameters
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
self.beta = None
self.beta_lower = 0.5
self.beta_upper = 0.5
self.alpha = alpha
self.max_iterations = max_iterations
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def StochasticWeightedTwinCriticUpdate(self, target_Q1, target_Q2):
self.beta = np.random.uniform(self.beta_lower, self.beta_upper)
target_Q = self.beta * torch.min(target_Q1, target_Q2) + (1 - self.beta) * target_Q1
self.beta_lower = self.beta_upper - (self.beta_upper - self.alpha) / self.max_iterations * (self.total_it + 1)
return target_Q
def update_parameters(self, replay_buffer, batch_size=256):
self.total_it += 1
# Sample from the experience replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to the target policy and add target smoothing regularization
noise = (torch.randn_like(action) * self.policy_noise).clamp(-self.noise_clip, self.noise_clip)
next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)
# Compute the target Q-value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = self.StochasticWeightedTwinCriticUpdate(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get the current Q-value estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute the critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates, update actor networks every update period
if self.total_it % self.policy_freq == 0:
# Compute the actor loss
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Soft update the target networks
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
# Save the model parameters
def save(self, file_name):
torch.save(self.actor.state_dict(), file_name + "_actor")
torch.save(self.actor_optimizer.state_dict(), file_name + "_actor_optimizer")
torch.save(self.critic.state_dict(), file_name + "_critic")
torch.save(self.critic_optimizer.state_dict(), file_name + "_critic_optimizer")
# Load the model parameters
def load(self, filename):
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
| 6,521 | 35.033149 | 118 | py |
multigen | multigen-master/scripts/main.py | from __future__ import absolute_import, division, print_function
import json
import argparse
import glob
import logging
import os
import pickle
import random
import re
import shutil
import subprocess
from typing import List, Dict
import csv
import logging
import sys
import collections
import math
import spacy
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.distributions import Bernoulli
from tqdm import tqdm, trange
from dictionary import Dictionary
from seq_generator import SequenceGenerator
from data import MHDataset
from collections import Counter
from optimization import AdamW, WarmupLinearSchedule, WarmupCosineSchedule, WarmupConstantSchedule
from tokenization_gpt2 import GPT2Tokenizer
from modeling_gpt2 import MultiHopGen, GPT2Config
logger = logging.getLogger()
MODEL_CLASSES = {
'gpt2': (GPT2Config, MultiHopGen, GPT2Tokenizer)
}
def list2str(list):
return " ".join([str(x) for x in list])
def str2list(str):
return [int(x) for x in str.split(" ")]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def set_log(log_file=None):
logger.setLevel(logging.INFO)
fmt = logging.Formatter('[%(asctime)s - %(levelname)s - %(name)s] %(message)s',
'%m/%d/%Y %H:%M:%S')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if log_file != None:
logfile = logging.FileHandler(log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def _compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if getattr(train_dataset, "print_features", False):
train_dataset.print_features()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, drop_last=False, num_workers=args.workers, pin_memory=True)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
gpt2_params = []
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(gn in n for gn in gpt2_params)]}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=int(args.warmup_ratio * t_total), t_total=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_valid = {'bleu':0.0, 'ppl':1e6, 'acc':0.0}
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
if args.validate_steps == -1:
args.validate_steps = len(train_dataloader)
for epoch in train_iterator:
local_step = 0
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
batch_size = batch[0].size(0)
mem_size = batch[6].size(1)
batch = {"src_input_ids": batch[0],
"attention_mask": batch[1],
"src_position_ids": batch[2],
"target_input_ids": batch[3],
"target_position_ids": batch[4],
"labels": batch[5],
"concept_ids": batch[6],
"concept_label": batch[7],
"distance": batch[8], #v3
"head": batch[9],
"tail": batch[10],
"relation": batch[11],
"triple_label": batch[12],
"vocab_map": batch[13],
"map_mask": batch[14],
"gate_label": batch[-1]}
batch_size = batch["src_input_ids"].size(0)
model.train()
outputs = model(**batch)
loss, gen_loss, gate_loss, triple_loss = outputs
loss = loss.mean()
gen_loss = gen_loss.mean()
gate_loss = gate_loss.mean()
triple_loss = triple_loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
local_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logger.info("Step: {} | Loss: {:.4f} | Gen loss: {:.4f} | Gate Loss: {:.4f} | Triple Loss: {:.4f}".format(global_step, (tr_loss - logging_loss)/args.logging_steps, gen_loss.item(), gate_loss.item(), triple_loss.item()))
logging_loss = tr_loss
if (step +1) % args.validate_steps == 0:
sign_list = {'ppl':1.0, 'bleu':-1.0, 'acc':-1.0}
result = evaluate(args, model, tokenizer, args.evaluate_metrics, prefix=epoch)
#logger.info(result)
logger.info("Epoch {} evaluate {}: {:.4f}".format(epoch, args.evaluate_metrics, result[args.evaluate_metrics]))
if args.save_last or (result[args.evaluate_metrics] - best_valid[args.evaluate_metrics]) * sign_list[args.evaluate_metrics] < 0:
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
torch.save(scheduler.state_dict(), os.path.join(args.output_dir, "scheduler.bin"))
torch.save(optimizer.state_dict(), os.path.join(args.output_dir, "optimizer.bin"))
subprocess.call(["cp", os.path.join(args.model_name_or_path, "vocab.json"), args.output_dir])
subprocess.call(["cp", os.path.join(args.model_name_or_path, "merges.txt"), args.output_dir])
logger.info("Saving model checkpoint to %s", args.output_dir)
best_valid[args.evaluate_metrics] = result[args.evaluate_metrics]
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, evaluate_metrics="ppl", prefix='0'):
eval_output_dir = args.output_dir
if prefix == 'test':
eval_data_file = args.test_data_file
elif prefix == 'train':
eval_data_file = args.train_data_file
else:
eval_data_file = args.dev_data_file
eval_dataset = MHDataset(args,
tokenizer,
eval_data_file,
src_max_length=args.source_length,
tgt_max_length=args.target_length,
do_generate = evaluate_metrics == 'bleu'
)
eval_dataset.load()
if getattr(eval_dataset, "print_features", False):
eval_dataset.print_features()
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, drop_last = False, num_workers=args.workers, pin_memory=True)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
gen_seqs = []
nb_eval_steps = 0
model.eval()
if evaluate_metrics == 'bleu':
generator = build_generator(args, eval_dataset)
top_ids = []
Hit_num = 0
precision = 0
recall = 0
triple_Hit_num = 0
concept_gt_Hit_num = 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if evaluate_metrics == 'bleu':
batch_size = batch[0].size(0)
mem_size = batch[6].size(1)
batch = {"src_input_ids": batch[0],
"attention_mask": batch[1],
"src_position_ids": batch[2],
"concept_ids": batch[6],
"concept_label": batch[7],
"distance": batch[8], #v3
"head": batch[9],
"tail": batch[10],
"relation": batch[11],
"triple_label": batch[12],
"vocab_map": batch[13],
"map_mask": batch[14],
"seq_generator": generator}
hypos = model.generate(**batch)
gen_seqs.extend(hypos)
nb_eval_steps += 1
if evaluate_metrics == 'ppl':
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {
"ppl": perplexity
}
elif evaluate_metrics == 'bleu':
nlp = spacy.load("en_core_web_sm")
references = [[x.split() for x in y] for y in eval_dataset.target]
predictions = [x.split() for x in gen_seqs]
bleu2 = _compute_bleu(references, predictions, max_order=2)
bleu4 = _compute_bleu(references, predictions, max_order=4)
result = {
"bleu": bleu4[0]
}
save_generation(args, gen_seqs, prefix=prefix)
elif evaluate_metrics == 'acc':
save_generation(args, top_ids, prefix=prefix)
result = {
"acc": Hit_num / len(eval_dataset)
}
return result
def generate(args, generator, model, dataset):
model.eval()
sampler = SequentialSampler(dataset)
loader = DataLoader(dataset, sampler=sampler, batch_size=args.per_gpu_eval_batch_size)
total_hypos = []
for i,batch in enumerate(tqdm(loader)):
batch = tuple(t.to(args.device) for t in batch)
sample = {
"input_ids":batch[0],
"attention_mask":batch[2],
"position_ids":batch[3]
}
with torch.no_grad():
hypos = generator.generate(model, sample, dataset)
total_hypos.extend(hypos)
return total_hypos
def build_generator(args, dataset):
generator = SequenceGenerator(
args,
Dictionary(dataset.tokenizer.encoder),
dataset.tokenizer,
beam_size=getattr(args, 'beam', 3),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', dataset.tgt_max_length),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
sampling=getattr(args, 'sampling', False),
sampling_topk=getattr(args, 'sampling_topk', -1),
sampling_topp=getattr(args, 'sampling_topp', -1.0),
temperature=getattr(args, 'temperature', 1.),
diverse_beam_groups=getattr(args, 'diverse_beam_groups', -1),
diverse_beam_strength=getattr(args, 'diverse_beam_strength', 0.5),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
)
return generator
def save_generation(args, results, prefix='0'):
save_result_dir = os.path.join(args.output_dir, "result_ep:{}.txt".format(prefix))
with open(save_result_dir, 'w') as f:
for line in results:
f.write(str(line) + '\n')
logger.info("Save generation result in {}".format(save_result_dir))
class JsonDumpHelper(json.JSONEncoder):
def default(self, obj):
if type(obj) != str:
return str(obj)
return json.JSONEncoder.default(self, obj)
def main():
parser = argparse.ArgumentParser()
## File parameters
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--dev_data_file", default=None, type=str, required=True,)
parser.add_argument("--test_data_file", default=None, type=str, required=True,)
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--graph_path", default=None, type=str, required=True)
## My parameters
parser.add_argument("--source_length", default=16, type=int)
parser.add_argument("--target_length", default=16, type=int)
parser.add_argument("--tb_log_dir", default=None, type=str)
parser.add_argument("--evaluate_metrics", default='ppl', type=str, help='choose between ppl and bleu')
parser.add_argument("--model_type", default="bert", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--model_name_or_path", default="bert-base-cased", type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--gamma", type=float, default=0.5, help="Discount factor in multi-hop reasoning")
parser.add_argument("--alpha", type=float, default=3.0, help="loss = gen_loss + alpha * gate_loss")
parser.add_argument("--beta", type=float, default=5.0, help='coefficient of weak supervision')
parser.add_argument("--aggregate_method", type=str, default="max",
help="Aggregation method in multi-hop reasoning, choose between: 'avg' and 'max'")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_generate", action='store_true')
parser.add_argument("--workers", default=0, type=int)
parser.add_argument("--save_last", action='store_true')
parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=1e-6, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=1, type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument('--validate_steps', type=int, default=3200)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
"flag (masked language modeling).")
if args.do_eval:
load_from_path = args.output_dir
args.output_dir = args.output_dir + '_eval'
else:
load_from_path = args.model_name_or_path
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
set_log(os.path.join(args.output_dir, "log.txt"))
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, do_lower_case=False)
model = model_class.from_pretrained(load_from_path,
source_length=args.source_length,
gamma=args.gamma,
alpha=args.alpha,
aggregate_method=args.aggregate_method,
tokenizer=tokenizer,)
model.resize_token_embeddings(len(tokenizer))
model.to(args.device)
logger.info('-' * 100)
logger.info('CONFIG:\n%s' %
json.dumps(vars(args), cls=JsonDumpHelper, indent=4, sort_keys=True))
logger.info('-' * 100)
if args.do_train:
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
train_dataset = MHDataset(args,
tokenizer,
args.train_data_file,
src_max_length=args.source_length,
tgt_max_length=args.target_length,
)
train_dataset.load()
if args.local_rank == 0:
torch.distributed.barrier()
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
if args.do_eval:
result = evaluate(args, model, tokenizer, args.evaluate_metrics, 'test')
logger.info("Test evaluate {}: {:.4f}".format(args.evaluate_metrics, result[args.evaluate_metrics]))
if __name__ == '__main__':
main()
| 26,926 | 42.360709 | 239 | py |
multigen | multigen-master/scripts/optimization.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
class WarmupCosineWithHardRestartsSchedule(LambdaLR):
""" Linear warmup and then cosine cycles with hard restarts.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
learning rate (with hard restarts).
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=1., last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineWithHardRestartsSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1. + math.cos(math.pi * ((float(self.cycles) * progress) % 1.0))))
class AdamW(Optimizer):
""" Implements Adam algorithm with weight decay fix.
Parameters:
lr (float): learning rate. Default 1e-3.
betas (tuple of 2 floats): Adams beta parameters (b1, b2). Default: (0.9, 0.999)
eps (float): Adams epsilon. Default: 1e-6
weight_decay (float): Weight decay. Default: 0.0
correct_bias (bool): can be set to False to avoid correcting bias in Adam (e.g. like in Bert TF repository). Default True.
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.0, correct_bias=True):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
correct_bias=correct_bias)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(1.0 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1.0 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state['step']
bias_correction2 = 1.0 - beta2 ** state['step']
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group['weight_decay'] > 0.0:
p.data.add_(-group['lr'] * group['weight_decay'], p.data)
return loss
| 8,634 | 44.687831 | 130 | py |
multigen | multigen-master/scripts/dictionary.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
from multiprocessing import Pool
import os
import torch
from fairseq.tokenizer import tokenize_line
from fairseq.binarizer import safe_readline
from fairseq.data import data_utils
class Dictionary(object):
"""A mapping from symbols to consecutive integers"""
def __init__(
self, tok2id
):
self.count = [1] * len(tok2id)
self.indices = tok2id
self.symbols = list(tok2id.keys())
self.unk_word, self.pad_word, self.eos_word = '<|notentity|>', '<|notentity|>', '<|endoftext|>'
self.bos_index = self.indices['<|bos|>']
self.pad_index = self.indices['<|pad|>']
self.unk_index = self.indices['<|pad|>'] # gpt2 vocabulary not define unk
self.eos_index = self.indices['<|endoftext|>']
self.nspecial = 3
def __eq__(self, other):
return self.indices == other.indices
def __getitem__(self, idx):
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__(self):
"""Returns the number of symbols in the dictionary"""
return len(self.symbols)
def __contains__(self, sym):
return sym in self.indices
def index(self, sym):
"""Returns the index of the specified symbol"""
assert isinstance(sym, str)
if sym in self.indices:
return self.indices[sym]
return self.unk_index
def string(self, tensor, bpe_symbol=None, escape_unk=False):
"""Helper for converting a tensor of token indices to a string.
Can optionally remove BPE symbols or escape <unk> words.
"""
if torch.is_tensor(tensor) and tensor.dim() == 2:
return '\n'.join(self.string(t, bpe_symbol, escape_unk) for t in tensor)
def token_string(i):
if i == self.unk():
return self.unk_string(escape_unk)
else:
return self[i]
sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
return data_utils.process_bpe_symbol(sent, bpe_symbol)
def unk_string(self, escape=False):
"""Return unknown string, optionally escaped as: <<unk>>"""
if escape:
return '<{}>'.format(self.unk_word)
else:
return self.unk_word
def add_symbol(self, word, n=1):
"""Adds a word to the dictionary"""
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + n
return idx
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(n)
return idx
def update(self, new_dict):
"""Updates counts from new dictionary."""
for word in new_dict.symbols:
idx2 = new_dict.indices[word]
if word in self.indices:
idx = self.indices[word]
self.count[idx] = self.count[idx] + new_dict.count[idx2]
else:
idx = len(self.symbols)
self.indices[word] = idx
self.symbols.append(word)
self.count.append(new_dict.count[idx2])
def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
"""Sort symbols by frequency in descending order, ignoring special ones.
Args:
- threshold defines the minimum word count
- nwords defines the total number of words in the final dictionary,
including special symbols
- padding_factor can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
if nwords <= 0:
nwords = len(self)
new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
new_symbols = self.symbols[:self.nspecial]
new_count = self.count[:self.nspecial]
c = Counter(dict(sorted(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))))
for symbol, count in c.most_common(nwords - self.nspecial):
if count >= threshold:
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(count)
else:
break
threshold_nwords = len(new_symbols)
if padding_factor > 1:
i = 0
while threshold_nwords % padding_factor != 0:
symbol = 'madeupword{:04d}'.format(i)
new_indices[symbol] = len(new_symbols)
new_symbols.append(symbol)
new_count.append(0)
i += 1
threshold_nwords += 1
assert len(new_symbols) % padding_factor == 0
assert len(new_symbols) == len(new_indices)
self.count = list(new_count)
self.symbols = list(new_symbols)
self.indices = new_indices
def bos(self):
"""Helper to get index of beginning-of-sentence symbol"""
return self.bos_index
def pad(self):
"""Helper to get index of pad symbol"""
return self.pad_index
def eos(self):
"""Helper to get index of end-of-sentence symbol"""
return self.eos_index
def unk(self):
"""Helper to get index of unk symbol"""
return self.unk_index
@classmethod
def load(cls, f, ignore_utf_errors=False):
"""Loads the dictionary from a text file with the format:
```
<symbol0> <count0>
<symbol1> <count1>
...
```
"""
d = cls()
d.add_from_file(f, ignore_utf_errors)
return d
def add_from_file(self, f, ignore_utf_errors=False):
"""
Loads a pre-existing dictionary from a text file and adds its symbols
to this instance.
"""
if isinstance(f, str):
try:
if not ignore_utf_errors:
with open(f, 'r', encoding='utf-8') as fd:
self.add_from_file(fd)
else:
with open(f, 'r', encoding='utf-8', errors='ignore') as fd:
self.add_from_file(fd)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please "
"rebuild the dataset".format(f))
return
lines = f.readlines()
indices_start_line = self._load_meta(lines)
for line in lines[indices_start_line:]:
idx = line.rfind(' ')
if idx == -1:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'")
word = line[:idx]
count = int(line[idx + 1:])
self.indices[word] = len(self.symbols)
self.symbols.append(word)
self.count.append(count)
def _save(self, f, kv_iterator):
if isinstance(f, str):
os.makedirs(os.path.dirname(f), exist_ok=True)
with open(f, 'w', encoding='utf-8') as fd:
return self.save(fd)
for k, v in kv_iterator:
print('{} {}'.format(k, v), file=f)
def _get_meta(self):
return [], []
def _load_meta(self, lines):
return 0
def save(self, f):
"""Stores dictionary into a text file"""
ex_keys, ex_vals = self._get_meta()
self._save(f, zip(ex_keys + self.symbols[self.nspecial:], ex_vals + self.count[self.nspecial:]))
def dummy_sentence(self, length):
t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
t[-1] = self.eos()
return t
def encode_line(self, line, line_tokenizer=tokenize_line, add_if_not_exist=True,
consumer=None, append_eos=True, reverse_order=False):
words = line_tokenizer(line)
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = self.add_symbol(word)
else:
idx = self.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = self.eos_index
return ids
@staticmethod
def _add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1):
counter = Counter()
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
line = f.readline()
while line:
for word in tokenize(line):
counter.update([word])
counter.update([eos_word])
if f.tell() > end:
break
line = f.readline()
return counter
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize, num_workers):
def merge_result(counter):
for w, c in sorted(counter.items()):
dict.add_symbol(w, c)
if num_workers > 1:
pool = Pool(processes=num_workers)
results = []
for worker_id in range(num_workers):
results.append(pool.apply_async(
Dictionary._add_file_to_dictionary_single_worker,
(filename, tokenize, dict.eos_word, worker_id, num_workers)
))
pool.close()
pool.join()
for r in results:
merge_result(r.get())
else:
merge_result(Dictionary._add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word))
class TruncatedDictionary(object):
def __init__(self, wrapped_dict, length):
self.__class__ = type(
wrapped_dict.__class__.__name__,
(self.__class__, wrapped_dict.__class__),
{}
)
self.__dict__ = wrapped_dict.__dict__
self.wrapped_dict = wrapped_dict
self.length = min(len(self.wrapped_dict), length)
def __len__(self):
return self.length
def __getitem__(self, i):
if i < self.length:
return self.wrapped_dict[i]
return self.wrapped_dict.unk() | 10,932 | 33.05919 | 109 | py |
multigen | multigen-master/scripts/modeling_gpt2.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT-2 model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from torch_scatter import scatter_max, scatter_mean, scatter_add
from transformers.modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from transformers.configuration_gpt2 import GPT2Config
from transformers.file_utils import add_start_docstrings
from transformers import BertModel, BertConfig
import numpy as np
logger = logging.getLogger(__name__)
GPT2_PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin",
"gpt2-medium": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-pytorch_model.bin",
"gpt2-large": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-pytorch_model.bin",
"distilgpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-pytorch_model.bin",}
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(gpt2_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array.squeeze())
for name, array in zip(names, arrays):
name = name[6:] # skip "model/"
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'w' or l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'wpe' or l[0] == 'wte':
pointer = getattr(pointer, l[0])
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super(Attention, self).__init__()
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.n_head, self.split_size // self.n_head)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
index_attn = torch.cat([index, index + self.split_size, index + (2*self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns-nd:ns, :ns].clone()
if head_mask is not None:
b *= head_mask
b += (head_mask == 0).float()
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
outputs = [torch.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
attn_outputs = self._attn(query, key, value, attention_mask, head_mask)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super(MLP, self).__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super(Block, self).__init__()
nx = config.n_embd
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
def forward(self, x, layer_past=None, attention_mask=None, head_mask=None):
output_attn = self.attn(self.ln_1(x),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask)
a = output_attn[0] # output_attn: a, present, (attentions)
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
outputs = [x] + output_attn[1:]
return outputs # x, present, (attentions)
class GPT2PreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = GPT2Config
pretrained_model_archive_map = GPT2_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_gpt2
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super(GPT2PreTrainedModel, self).__init__(*inputs, **kwargs)
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def reorder_encoder_out(self, encoder_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_encoder_outs = []
for encoder_out in encoder_outs:
new_encoder_outs.append(encoder_out.index_select(0, new_order))
return new_encoder_outs
GPT2_START_DOCSTRING = r""" OpenAI GPT-2 model was proposed in
`Language Models are Unsupervised Multitask Learners`_
by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
It's a causal (unidirectional) transformer pre-trained using language modeling on a very large
corpus of ~40 GB of text data.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`Language Models are Unsupervised Multitask Learners`:
https://openai.com/blog/better-language-models/
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.GPT2Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
GPT2_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.GPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**past**:
list of ``torch.FloatTensor`` (one for each layer):
that contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.",
GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2Model(GPT2PreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, source_length=0):
super(GPT2Model, self).__init__(config)
print("source length: {}".format(source_length))
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
#self.output_past = config.output_past
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.source_length = source_length
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
self.wte = self._get_resized_embeddings(self.wte, new_num_tokens)
return self.wte
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = past[0][0].size(-2)
if position_ids is None:
position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
# Attention mask.
if attention_mask is not None:
attention_mask = attention_mask.view(-1, input_shape[-1])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.n_layer
# source mask:
if self.source_length > 0:
head_mask = torch.ones(1, 1, 1, input_ids.size(1), dtype=torch.float, device=input_ids.device)
head_mask[...,:self.source_length] = 0
head_mask = [head_mask] * self.config.n_layer
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = ()
all_attentions = []
all_hidden_states = ()
for i, (block, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask[i])
hidden_states, present = outputs[:2]
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
#if self.output_past:
# outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + (-1,) + all_attentions[0].shape[-2:]
all_attentions = tuple(t.view(*attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (presents), (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2LMHeadModel(GPT2PreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
def __init__(self, config, source_length=0):
super(GPT2LMHeadModel, self).__init__(config)
self.transformer = GPT2Model(config, source_length)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def get_representation(self, input_ids, position_ids=None, attention_mask=None):
'''get sentence representation via max-pooling'''
transformer_outputs = self.transformer(input_ids,
past=None,
attention_mask=attention_mask,
token_type_ids=None,
position_ids=position_ids,
head_mask=None)
hidden_states = transformer_outputs[0]
pool_repr = torch.max(hidden_states * attention_mask.unsqueeze(2).expand_as(hidden_states.data), 1)[0]
return pool_repr
def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
labels=None):
transformer_outputs = self.transformer(input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits#[..., :-1, :].contiguous()
shift_labels = labels#[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
@add_start_docstrings("""The GPT2 Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""", GPT2_START_DOCSTRING, GPT2_INPUTS_DOCSTRING)
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``torch.LongTensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
**lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``lm_labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-1`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
**mc_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_loss**: (`optional`, returned when ``lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Language modeling loss.
**mc_loss**: (`optional`, returned when ``multiple_choice_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Multiple choice classification loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**past**:
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
that contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import torch
from transformers import GPT2Tokenizer, GPT2DoubleHeadsModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2DoubleHeadsModel.from_pretrained('gpt2')
# Add a [CLS] to the vocabulary (we should train it also!)
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
encoded_choices = [tokenizer.encode(s) for s in choices]
cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices]
input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2
mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(GPT2DoubleHeadsModel, self).__init__(config)
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def forward(self, input_ids, past=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
mc_token_ids=None, lm_labels=None, mc_labels=None):
transformer_outputs = self.transformer(input_ids,
past=past,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)),
mc_labels.view(-1))
outputs = (loss,) + outputs
if lm_labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = lm_labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss(ignore_index=-1)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, presents, (all hidden_states), (attentions)
class MultiHopGen(GPT2PreTrainedModel):
def __init__(self, config, source_length=0, gamma=0.8, alpha=1, beta=1, aggregate_method="max", tokenizer=None, hop_number=2):
super(MultiHopGen, self).__init__(config)
self.transformer = GPT2Model(config, source_length)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.hop_number = hop_number
self.gamma = gamma
self.alpha = alpha
self.beta = beta
self.aggregate_method = aggregate_method
self.tokenizer = tokenizer
self.triple_linear = nn.Linear(config.n_embd * 3, config.n_embd, bias=False)
self.W_s = nn.ModuleList([nn.Linear(config.n_embd, config.n_embd, bias=False) for _ in range(self.hop_number)])
self.W_n = nn.ModuleList([nn.Linear(config.n_embd, config.n_embd, bias=False) for _ in range(self.hop_number)])
self.W_r = nn.ModuleList([nn.Linear(config.n_embd, config.n_embd, bias=False) for _ in range(self.hop_number)])
self.gate_linear = nn.Linear(config.n_embd, 1)
self.relation_embd = nn.Embedding(40, config.n_embd)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
logger.info("Tie weights in head!!!!!")
self._tie_or_clone_weights(self.lm_head,
self.transformer.wte)
def multi_layer_comp_gcn(self, concept_hidden, relation_hidden, head, tail, concept_label, triple_label, layer_number=2):
for i in range(layer_number):
concept_hidden, relation_hidden = self.comp_gcn(concept_hidden, relation_hidden, head, tail, concept_label, triple_label, i)
return concept_hidden, relation_hidden
def comp_gcn(self, concept_hidden, relation_hidden, head, tail, concept_label, triple_label, layer_idx):
'''
concept_hidden: bsz x mem x hidden
relation_hidden: bsz x mem_t x hidden
'''
bsz = head.size(0)
mem_t = head.size(1)
mem = concept_hidden.size(1)
hidden_size = concept_hidden.size(2)
update_node = torch.zeros_like(concept_hidden).to(concept_hidden.device).float()
count = torch.ones_like(head).to(head.device).masked_fill_(triple_label == -1, 0).float()
count_out = torch.zeros(bsz, mem).to(head.device).float()
o = concept_hidden.gather(1, head.unsqueeze(2).expand(bsz, mem_t, hidden_size))
o = o.masked_fill(triple_label.unsqueeze(2) == -1, 0)
scatter_add(o, tail, dim=1, out=update_node)
scatter_add( - relation_hidden.masked_fill(triple_label.unsqueeze(2) == -1, 0), tail, dim=1, out=update_node)
scatter_add(count, tail, dim=1, out=count_out)
o = concept_hidden.gather(1, tail.unsqueeze(2).expand(bsz, mem_t, hidden_size))
o = o.masked_fill(triple_label.unsqueeze(2) == -1, 0)
scatter_add(o, head, dim=1, out=update_node)
scatter_add( - relation_hidden.masked_fill(triple_label.unsqueeze(2) == -1, 0), head, dim=1, out=update_node)
scatter_add(count, head, dim=1, out=count_out)
act = nn.ReLU()
update_node = self.W_s[layer_idx](concept_hidden) + self.W_n[layer_idx](update_node) / count_out.clamp(min=1).unsqueeze(2)
update_node = act(update_node)
return update_node, self.W_r[layer_idx](relation_hidden)
def multi_layer_gcn(self, concept_hidden, head, tail, concept_label, triple_label, layer_number=2):
for i in range(layer_number):
concept_hidden = self.gcn(concept_hidden, head, tail, concept_label, triple_label, i)
return concept_hidden
def gcn(self, concept_hidden, head, tail, concept_label, triple_label, layer_idx):
'''
concept_hidden: bsz x mem x hidden
'''
bsz = head.size(0)
mem_t = head.size(1)
mem = concept_hidden.size(1)
hidden_size = concept_hidden.size(2)
update_hidden = torch.zeros_like(concept_hidden).to(concept_hidden.device).float()
count = torch.ones_like(head).to(head.device).masked_fill_(triple_label == -1, 0).float()
count_out = torch.zeros(bsz, mem).to(head.device).float()
o = concept_hidden.gather(1, head.unsqueeze(2).expand(bsz, mem_t, hidden_size))
o = o.masked_fill(triple_label.unsqueeze(2) == -1, 0)
scatter_add(o, tail, dim=1, out=update_hidden)
scatter_add(count, tail, dim=1, out=count_out)
o = concept_hidden.gather(1, tail.unsqueeze(2).expand(bsz, mem_t, hidden_size))
o = o.masked_fill(triple_label.unsqueeze(2) == -1, 0)
scatter_add(o, head, dim=1, out=update_hidden)
scatter_add(count, head, dim=1, out=count_out)
act = nn.ReLU()
update_hidden = self.W_s[layer_idx](concept_hidden) + self.W_n[layer_idx](update_hidden) / count_out.clamp(min=1).unsqueeze(2)
update_hidden = act(update_hidden)
return update_hidden
def multi_hop(self, triple_prob, distance, head, tail, concept_label, triple_label, gamma=0.8, iteration = 3, method="avg"):
'''
triple_prob: bsz x L x mem_t
distance: bsz x mem
head, tail: bsz x mem_t
concept_label: bsz x mem
triple_label: bsz x mem_t
Init binary vector with source concept == 1 and others 0
expand to size: bsz x L x mem
'''
concept_probs = []
cpt_size = (triple_prob.size(0), triple_prob.size(1), distance.size(1))
init_mask = torch.zeros_like(distance).unsqueeze(1).expand(*cpt_size).to(distance.device).float()
init_mask.masked_fill_((distance == 0).unsqueeze(1), 1)
final_mask = init_mask.clone()
init_mask.masked_fill_((concept_label == -1).unsqueeze(1), 0)
concept_probs.append(init_mask)
head = head.unsqueeze(1).expand(triple_prob.size(0), triple_prob.size(1), -1)
tail = tail.unsqueeze(1).expand(triple_prob.size(0), triple_prob.size(1), -1)
for step in range(iteration):
'''
Calculate triple head score
'''
node_score = concept_probs[-1]
triple_head_score = node_score.gather(2, head)
triple_head_score.masked_fill_((triple_label == -1).unsqueeze(1), 0)
'''
Method:
- avg:
s(v) = Avg_{u \in N(v)} gamma * s(u) + R(u->v)
- max:
s(v) = max_{u \in N(v)} gamma * s(u) + R(u->v)
'''
update_value = triple_head_score * gamma + triple_prob
out = torch.zeros_like(node_score).to(node_score.device).float()
if method == "max":
scatter_max(update_value, tail, dim=-1, out=out)
elif method == "avg":
scatter_mean(update_value, tail, dim=-1, out=out)
out.masked_fill_((concept_label == -1).unsqueeze(1), 0)
concept_probs.append(out)
'''
Natural decay of concept that is multi-hop away from source
'''
total_concept_prob = final_mask * -1e5
for prob in concept_probs[1:]:
total_concept_prob += prob
# bsz x L x mem
return total_concept_prob
def forward(self, src_input_ids, attention_mask, src_position_ids,
target_input_ids, target_position_ids, labels,
concept_ids, concept_label, distance,
head, tail, relation, triple_label,
vocab_map, map_mask, gate_label):
bsz = src_input_ids.size(0)
mem_size = concept_ids.size(1)
memory = self.transformer.wte(concept_ids)
rel_repr = self.relation_embd(relation)
node_repr, rel_repr = self.multi_layer_comp_gcn(memory, rel_repr, head, tail, concept_label, triple_label, layer_number=self.hop_number)
head_repr = torch.gather(node_repr, 1, head.unsqueeze(-1).expand(node_repr.size(0), head.size(1), node_repr.size(-1)))
tail_repr = torch.gather(node_repr, 1, tail.unsqueeze(-1).expand(node_repr.size(0), tail.size(1), node_repr.size(-1)))
# bsz x mem_triple x hidden
triple_repr = torch.cat((head_repr, rel_repr, tail_repr), dim=-1)
'''
Training phase, merge source and target input
'''
assert(not torch.isnan(triple_repr).any().item())
input_ids = torch.cat([src_input_ids, target_input_ids], dim=1)
attention_mask = torch.cat([attention_mask, torch.ones_like(target_input_ids).to(target_input_ids.device)], dim=1)
position_ids = torch.cat([src_position_ids, target_position_ids], dim=1)
gate_mask = (gate_label != -1).float()
gate_label.masked_fill_(gate_label == -1, 0)
# only optimize if has example
lm_mask = (gate_label.sum(1) != 0).float().unsqueeze(1)
gate_mask = lm_mask.expand_as(gate_label) * gate_mask
# bsz x L
hybrid_probs, gate, triple_score = self.autoreg_forward(input_ids,
attention_mask,
position_ids,
memory_dict={"triple_repr": triple_repr,
"distance": distance,
"head": head,
"tail": tail,
"concept_label": concept_label,
"triple_label": triple_label,
"vocab_map": vocab_map,
"map_mask": map_mask},
lm_mask=lm_mask)
'''
Compute loss: gate loss and generation loss
'''
gate_loss_fn = nn.BCELoss(weight=gate_mask.view(-1), reduction='mean')
gate_loss = gate_loss_fn(gate.view(-1), gate_label.view(-1).float())
gen_loss_fn = nn.NLLLoss(ignore_index=-1, reduction='mean')
hybrid_probs_clamp = hybrid_probs.clamp(min=1e-5)
triple_mask = (triple_label != -1).unsqueeze(1).expand_as(triple_score).float()
triple_label = triple_label.unsqueeze(1).expand_as(triple_score) * triple_mask
triple_loss_fn = nn.BCELoss(weight=triple_mask.view(-1), reduction='mean')
triple_loss = triple_loss_fn(triple_score.view(-1), triple_label.view(-1).float())
gen_loss = gen_loss_fn(hybrid_probs_clamp.log().view(-1, hybrid_probs.size(-1)), labels.view(-1))
assert(not torch.isinf(gen_loss).any().item())
loss = gen_loss + self.alpha * gate_loss + self.beta * triple_loss
return loss, gen_loss, gate_loss, triple_loss
def generate(self, src_input_ids, attention_mask, src_position_ids,
concept_ids, concept_label, distance,
head, tail, relation, triple_label,
vocab_map, map_mask,
seq_generator):
bsz = src_input_ids.size(0)
mem_size = concept_ids.size(1)
memory = self.transformer.wte(concept_ids)
rel_repr = self.relation_embd(relation)
node_repr, rel_repr = self.multi_layer_comp_gcn(memory, rel_repr, head, tail, concept_label, triple_label, layer_number=self.hop_number)
head_repr = torch.gather(node_repr, 1, head.unsqueeze(-1).expand(node_repr.size(0), head.size(1), node_repr.size(-1)))
tail_repr = torch.gather(node_repr, 1, tail.unsqueeze(-1).expand(node_repr.size(0), tail.size(1), node_repr.size(-1)))
# bsz x mem_triple x hidden
triple_repr = torch.cat((head_repr, rel_repr, tail_repr), dim=-1)
sample = {"input_ids": src_input_ids, "attention_mask": attention_mask, "position_ids": src_position_ids}
memory = {"triple_repr": triple_repr,
"distance": distance,
"head": head,
"tail": tail,
"concept_label": concept_label,
"triple_label": triple_label,
"vocab_map": vocab_map,
"map_mask": map_mask}
return seq_generator.generate(self.autoreg_forward, sample, memory)
def autoreg_forward(self, input_ids, attention_mask, position_ids, memory_dict, do_generate=False, lm_mask=None):
'''
memory_dict:
- triple_repr:
- distance:
- head:
- tail:
- triple_label
- vocab_map:
- map_mask:
return:
- probs: bsz x L x vocab
- gate: bsz x L x 1
'''
hidden_states = self.transformer(input_ids, attention_mask = attention_mask,
position_ids = position_ids)[0]
if do_generate:
hidden_states = hidden_states[:, -1, :].unsqueeze(1)
sigmoid = nn.Sigmoid()
tanh = nn.Tanh()
relu = nn.ReLU()
softmax = nn.Softmax(dim=-1)
triple_logits = torch.matmul(hidden_states, self.triple_linear(memory_dict["triple_repr"]).transpose(1, 2))
triple_score = sigmoid(triple_logits)
# bsz x L x mem_t
triple_score = triple_score.masked_fill((memory_dict["triple_label"] == -1).unsqueeze(1), 0)
# aggregate probability to nodes
unorm_cpt_probs = self.multi_hop(triple_score,
memory_dict["distance"],
memory_dict["head"],
memory_dict["tail"],
memory_dict["concept_label"],
memory_dict["triple_label"],
gamma = self.gamma,
iteration = self.hop_number,
method = self.aggregate_method)
# bsz x L x mem
cpt_probs = softmax(unorm_cpt_probs)
# bsz x L x mem
cpt_probs_vocab = cpt_probs.gather(2, memory_dict["vocab_map"].unsqueeze(1).expand(cpt_probs.size(0), cpt_probs.size(1), -1))
cpt_probs_vocab.masked_fill_((memory_dict["map_mask"] == 0).unsqueeze(1), 0)
# bsz x L x vocab
gate = sigmoid(self.gate_linear(hidden_states))
# bsz x L x 1
lm_logits = self.lm_head(hidden_states)
lm_probs = softmax(lm_logits)
if do_generate:
hybrid_probs = lm_probs * (1 - gate) + gate * cpt_probs_vocab
else:
hybrid_probs = lm_probs * (1 - gate * lm_mask.unsqueeze(1)) + gate * lm_mask.unsqueeze(1) * cpt_probs_vocab
return hybrid_probs, gate, triple_score
| 50,225 | 46.788773 | 148 | py |
multigen | multigen-master/scripts/data.py | import torch
import os
import json
import logging
import csv
import itertools
from torch.utils.data import Dataset
import random
from transformers import BertTokenizer
logger = logging.getLogger()
def normalize_case(text):
if len(text) > 1:
try:
normalized = text[0].upper() + text[1:].lower()
if normalized[-1] != '.':
normalized = normalized + '.'
except:
raise RuntimeError("Cannot normalize text {}".format(text))
return normalized
return text
class MHDataset(Dataset):
def __init__(self, args, tokenizer, data_path, src_max_length=256, tgt_max_length=64, do_generate=False, max_memory_size=400, max_triple_size=800):
self.do_generate = do_generate
self.args = args
self.src_max_length = src_max_length
self.tgt_max_length = tgt_max_length
self.tokenizer = tokenizer
self.max_memory_size = max_memory_size
self.max_triple_size = max_triple_size
self.bos = self.tokenizer.encoder["<|bos|>"]
self.pad = self.tokenizer.encoder["<|pad|>"]
self.eos = self.tokenizer.encoder["<|endoftext|>"]
self.data_path = data_path
def load(self):
data_path = self.data_path
self.source = []
self.source_kg = []
self.target = []
self.source_path = os.path.join(data_path, "source.csv")
count = 0
with open(self.source_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row_count, row in enumerate(csv_reader):
self.source.append(row[1:])
count = 0
self.target_path = os.path.join(data_path, "target.csv")
with open(self.target_path, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if self.do_generate:
self.target.append(row[1:])
else:
self.target.append(row[1])
count = 0
self.concepts = []
self.concepts_labels = []
self.distances = [] #v3
self.head_ids = []
self.tail_ids = []
self.relations = []
self.triple_labels = []
with open(os.path.join(data_path, self.args.graph_path), 'r') as f:
for line in f.readlines():
line = json.loads(line)
assert(len(line['concepts']) == len(line['labels'])), (len(line['concepts']), len(line['labels']))
self.concepts.append(line['concepts'])
self.concepts_labels.append(line['labels'])
self.distances.append(line['distances'])
self.head_ids.append(line['head_ids'])
self.tail_ids.append(line['tail_ids'])
self.relations.append(line['relations'])
self.triple_labels.append(line['triple_labels'])
def __len__(self):
return len(self.source)
def print_features(self):
logger.info("-"*50 + "Features" + "-"*50)
exs = [self.__getitem__(i) for i in range(0,min(3, len(self.concepts)))]
for ex in exs:
logger.info("Input: {}".format([self.tokenizer.decoder[x] for x in ex[0].tolist()]))
logger.info("Attention mask: {}".format(ex[1].tolist()))
logger.info("Position: {}".format(ex[2].tolist()))
logger.info("Target: {}".format([self.tokenizer.decoder[x] for x in ex[3].tolist()]))
logger.info("Position: {}".format(ex[4].tolist()))
logger.info("Labels: {}".format([self.tokenizer.decoder[x] for x in (ex[5].masked_select(ex[5]>=0).tolist())]))
logger.info("Gate labels: {}".format(ex[-1].tolist()))
def __getitem__(self, idx):
src = self.source[idx]
tgt = self.target[idx]
concept = self.concepts[idx]
cpt_label = self.concepts_labels[idx]
dist = self.distances[idx] #v3
relations = self.relations[idx]
head_ids = self.head_ids[idx]
tail_ids = self.tail_ids[idx]
triple_labels = self.triple_labels[idx]
relations = [x[0] for x in relations]
assert(len(dist) == len(concept))
concept_ids = []
_concept_ids = []
concept_mask = []
bert_input = []
bert_mask = []
_concept_label = cpt_label.copy()
head_ids_trunc = head_ids.copy()
tail_ids_trunc = tail_ids.copy()
relations_trunc = relations.copy()
triple_labels_trunc = triple_labels.copy()
_distance = dist.copy()
vocab_map = [] # usage: cpt_prob.gather(-1, vocab_map) vocab_map size the same as gpt-2 vocab
map_mask = [] # usage: cpt_prob_vocab.masked_fill_(map_mask == 0, 0)
target_concept_ids = []
distance = []
concept_label = []
count = 0
for e, l, d in zip(concept, _concept_label, _distance):
tok = self.tokenizer.encode(' ' + e)
count += 1
if len(tok) == 1:
_concept_ids.append(tok[0])
concept_ids.append(tok[0])
distance.append(d)
concept_label.append(l)
if l == 1:
#print(count)
target_concept_ids.append(tok[0])
if len(concept_ids) > self.max_memory_size:
concept_ids = concept_ids[:self.max_memory_size]
concept_label = concept_label[:self.max_memory_size]
distance = distance[:self.max_memory_size]
while len(concept_ids) < self.max_memory_size:
concept_ids.append(self.pad)
concept_label.append(-1)
distance.append(0)
for idx in self.tokenizer.decoder.keys():
try:
pos = _concept_ids.index(idx)
vocab_map.append(pos)
map_mask.append(1)
except:
vocab_map.append(0)
map_mask.append(0)
assert(len(vocab_map) == len(self.tokenizer.decoder)), len(vocab_map)
assert(len(map_mask) == len(self.tokenizer.decoder)), len(map_mask)
if len(head_ids_trunc) > self.max_triple_size:
head_ids_trunc = head_ids_trunc[:self.max_triple_size]
tail_ids_trunc = tail_ids_trunc[:self.max_triple_size]
relations_trunc = relations_trunc[:self.max_triple_size]
triple_labels_trunc = triple_labels_trunc[:self.max_triple_size]
while len(head_ids_trunc) < self.max_triple_size:
head_ids_trunc.append(0)
tail_ids_trunc.append(0)
relations_trunc.append(0)
triple_labels_trunc.append(-1)
src_input_ids = []
for s in src:
src_input_ids.extend(self.tokenizer.encode(' ' + s))
src_input_ids.append(self.eos)
src_position_ids = list(range(0, len(src_input_ids)))
assert (len(src_input_ids) == len(src_position_ids))
if len(src_input_ids) > self.src_max_length:
src_input_ids = src_input_ids[:self.src_max_length]
src_position_ids = src_position_ids[:self.src_max_length]
attention_mask = [1] * len(src_input_ids)
while len(src_input_ids) < self.src_max_length:
src_input_ids += [self.pad]
src_position_ids += [0]
attention_mask += [0]
target_input_ids = []
target_position_ids = []
labels = []
gate_labels = []
if not self.do_generate:
target_input_ids = [self.bos] + self.tokenizer.encode(' ' + tgt)
target_position_ids = list(range(0, len(target_input_ids)))
if len(target_input_ids) > self.tgt_max_length:
target_input_ids = target_input_ids[:self.tgt_max_length]
target_position_ids = target_position_ids[:self.tgt_max_length]
labels = target_input_ids[1:] + [self.eos]
gate_labels = [1 if x in target_concept_ids else 0 for x in labels]
while len(target_input_ids) < self.tgt_max_length:
target_input_ids += [self.pad]
target_position_ids += [0]
labels += [-1]
gate_labels += [-1]
gate_labels = [-1] * self.src_max_length + gate_labels
labels = [-1] * self.src_max_length + labels
assert(len(concept_ids) == self.max_memory_size), len(concept_ids)
assert(len(distance) == self.max_memory_size), len(distance)
return (torch.tensor(src_input_ids),
torch.tensor(attention_mask),
torch.tensor(src_position_ids),
torch.tensor(target_input_ids),
torch.tensor(target_position_ids),
torch.tensor(labels),
torch.tensor(concept_ids),
torch.tensor(concept_label),
torch.tensor(distance),
torch.tensor(head_ids_trunc),
torch.tensor(tail_ids_trunc),
torch.tensor(relations_trunc),
torch.tensor(triple_labels_trunc),
torch.tensor(vocab_map),
torch.tensor(map_mask),
torch.tensor(gate_labels))
| 9,321 | 38.004184 | 151 | py |
multigen | multigen-master/scripts/seq_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
import logging
logger = logging.getLogger(__name__)
class SequenceGenerator(object):
def __init__(
self,
args,
tgt_dict,
tokenizer,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
sampling=False,
sampling_topk=-1,
sampling_topp=-1.0,
temperature=1.,
diverse_beam_groups=-1,
diverse_beam_strength=0.5,
match_source_len=False,
no_repeat_ngram_size=0,
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
id2tok_fn (HuggingFace tokenizer.decode()): Used to transfer bpe indices to words
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
sampling (bool, optional): sample outputs instead of beam search
(default: False)
sampling_topk (int, optional): only sample among the top-k choices
at each step (default: -1)
sampling_topp (float, optional): only sample among the smallest set
of words whose cumulative probability mass exceeds p
at each step (default: -1.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
diverse_beam_groups/strength (float, optional): parameters for
Diverse Beam Search sampling
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.args = args
self.tgt_dict = tgt_dict
self.tokenizer = tokenizer
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.bos = tgt_dict.bos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
self.gpt2_max_length = 1024
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'
assert temperature > 0, '--temperature must be greater than 0'
if sampling:
self.search = search.Sampling(tgt_dict, sampling_topk, sampling_topp)
elif diverse_beam_groups > 0:
self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)
elif match_source_len:
self.search = search.LengthConstrainedBeamSearch(
tgt_dict, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,
)
else:
self.search = search.BeamSearch(tgt_dict)
@torch.no_grad()
def ensemble_generate(self, models, sample, **kwargs):
model = EnsembleModel(models)
return self.generate(model, sample, **kwargs)
@torch.no_grad()
def generate(
self,
model_forward,
sample,
memory,
prefix_tokens=None,
output_text=True,
**kwargs
):
#if not self.retain_dropout:
# model.eval()
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
- input_ids
- attention_mask
- position_ids
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
"""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
#encoder_input = {
# k: v for k, v in sample['net_input'].items()
# if k != 'prev_output_tokens'
#}
bos_token = self.tgt_dict.bos()
encoder_outs = {}
k_order = ["input_ids", "attention_mask", "position_ids"]
for i, (k,v) in enumerate(sample.items()):
assert(k_order[i] == k), "generator input sample should be Dict with keys ordered in {}!".format(k_order)
encoder_outs[k] = v[:,:self.args.source_length] # pop out bos
encoder_outs.update(memory)
src_tokens = encoder_outs["input_ids"]
src_lengths = encoder_outs["input_ids"].long().sum(dim=1)
#src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
self.gpt2_max_length - 1,
)
# compute the encoder output for each beam
#encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = reorder_encoder_out(encoder_outs, new_order)
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).to(src_tokens.device).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).to(src_tokens.device).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = bos_token
gates = scores.new(bsz * beam_size, max_len + 2)
gates_buf = gates.clone()
attn, attn_buf = None, None
# The blacklist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens, device=src_tokens.device): # noqa
if name not in buffers:
buffers[name] = type_of.new().to(device)
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
gates_clone = gates.index_select(0, bbsz_idx)[:, 1:step+2]
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
"gates": gates_clone[i],
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs.to(src_tokens.device) - torch.arange(batch_idxs.numel()).type_as(batch_idxs).to(src_tokens.device)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
encoder_outs = reorder_encoder_out(encoder_outs, reorder_state)
encoder_outs["input_ids"] = torch.cat((encoder_outs["input_ids"][:, :src_len], tokens[:, :step+1]), dim=1)
encoder_outs["attention_mask"] = torch.cat((encoder_outs["attention_mask"],
encoder_outs["attention_mask"].new_ones(encoder_outs["attention_mask"].size(0), 1)), dim=1)
encoder_outs["position_ids"] = torch.cat((encoder_outs["position_ids"],
encoder_outs["position_ids"].new_ones(encoder_outs["position_ids"].size(0), 1) * step), dim=1)
probs, out_gate, _ = model_forward(input_ids = encoder_outs["input_ids"],
attention_mask = encoder_outs["attention_mask"],
position_ids = encoder_outs["position_ids"],
memory_dict = encoder_outs,
do_generate = True)
probs = probs[:, -1, :]
lprobs = probs.log()
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle min and max length constraints
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
elif step < self.min_len:
lprobs[:, self.eos] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1):
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = -math.inf
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
avg_attn_scores = None
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
gates[:, step + 1].copy_(out_gate[:, 0, 0])
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
bbsz_offsets = bbsz_offsets.to(cand_beams.device)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos (except for blacklisted ones)
eos_mask = cand_indices.eq(self.eos)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
gates = gates.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
gates_buf.resize_as_(gates)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
(eos_mask.type_as(cand_offsets) * cand_size).to(src_tokens.device),
cand_offsets[:eos_mask.size(1)].to(src_tokens.device),
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
torch.index_select(
gates[:, :step + 2], dim=0, index=active_bbsz_idx,
out=gates_buf[:, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
gates, gates_buf = gates_buf, gates
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
result_strs = []
result_scores = []
for j, hypo in enumerate(finalized):
hypo = hypo[0]
hypo_tokens = hypo['tokens']
hypo_gates = hypo['gates']
result_scores.append(hypo['score'])
if output_text:
result_strs.append(self.tokenizer.decode(hypo_tokens.tolist()[:-1]))
else:
result_strs.append(hypo_tokens.tolist()[:-1])
return result_strs#, result_scores
def reorder_encoder_out(encoder_outs, new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_encoder_outs = {}
for key, encoder_out in encoder_outs.items():
new_encoder_outs[key] = encoder_out.index_select(0, new_order)
return new_encoder_outs
class EnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(**encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = EnsembleModelWithAlignment(models)
finalized = super()._generate(model, sample, **kwargs)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
beam_size = self.beam_size
src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \
self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, 'full_context_alignment', False) for m in model.models):
attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)
for i in range(bsz * beam_size)
]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)
finalized[i // beam_size][i % beam_size]['alignment'] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)
src_lengths = sample['net_input']['src_lengths']
src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)
prev_output_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]['attn']
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn | 33,724 | 41.36809 | 136 | py |
multigen | multigen-master/preprocess/find_neighbours.py | import configparser
import networkx as nx
import itertools
import math
import random
import json
from tqdm import tqdm
import sys
import time
import timeit
import numpy as np
import torch
from collections import Counter
import spacy
from scipy import spatial
import sys
config = configparser.ConfigParser()
config.read("paths.cfg")
cpnet = None
cpnet_simple = None
concept2id = None
relation2id = None
id2relation = None
id2concept = None
nlp = spacy.load('en_core_web_sm', disable=['ner', 'parser', 'textcat'])
def load_resources():
global concept2id, relation2id, id2relation, id2concept, concept_embs, relation_embs
concept2id = {}
id2concept = {}
with open(config["paths"]["concept_vocab"], "r", encoding="utf8") as f:
for w in f.readlines():
concept2id[w.strip()] = len(concept2id)
id2concept[len(id2concept)] = w.strip()
print("concept2id done")
id2relation = {}
relation2id = {}
with open(config["paths"]["relation_vocab"], "r", encoding="utf8") as f:
for w in f.readlines():
id2relation[len(id2relation)] = w.strip()
relation2id[w.strip()] = len(relation2id)
print("relation2id done")
def load_cpnet():
global cpnet,concept2id, relation2id, id2relation, id2concept, cpnet_simple
print("loading cpnet....")
cpnet = nx.read_gpickle(config["paths"]["conceptnet_en_graph"])
print("Done")
cpnet_simple = nx.Graph()
for u, v, data in cpnet.edges(data=True):
w = data['weight'] if 'weight' in data else 1.0
if cpnet_simple.has_edge(u, v):
cpnet_simple[u][v]['weight'] += w
else:
cpnet_simple.add_edge(u, v, weight=w)
def get_edge(src_concept, tgt_concept):
global cpnet, concept2id, relation2id, id2relation, id2concept
try:
rel_list = cpnet[src_concept][tgt_concept]
return list(set([rel_list[item]["rel"] for item in rel_list]))
except:
return []
def cosine_score_triple(h, t, r):
global concept_embs, relation_embs
#return np.linalg.norm(t-h-r)
if r < 17:
return (1 + 1 - spatial.distance.cosine(relation_embs[r], concept_embs[t] - concept_embs[h]) ) /2
else:
return (1 + 1 - spatial.distance.cosine(relation_embs[r - 17], concept_embs[h] - concept_embs[t]) ) /2
def find_neighbours_frequency(source_sentence, source_concepts, target_concepts, T, max_B=100):
global cpnet, concept2id, relation2id, id2relation, id2concept, cpnet_simple, total_concepts_id
source = [concept2id[s_cpt] for s_cpt in source_concepts]
start = source
Vts = dict([(x,0) for x in start])
Ets = {}
total_concepts_id_set = set(total_concepts_id)
for t in range(T):
V = {}
templates = []
for s in start:
if s in cpnet_simple:
for n in cpnet_simple[s]:
if n not in Vts and n in total_concepts_id_set:
if n not in Vts:
if n not in V:
V[n] = 1
else:
V[n] += 1
if n not in Ets:
rels = get_edge(s, n)
if len(rels) > 0:
Ets[n] = {s: rels}
else:
rels = get_edge(s, n)
if len(rels) > 0:
Ets[n].update({s: rels})
V = list(V.items())
count_V = sorted(V, key=lambda x: x[1], reverse=True)[:max_B]
start = [x[0] for x in count_V if x[0] in total_concepts_id_set]
Vts.update(dict([(x, t+1) for x in start]))
_concepts = list(Vts.keys())
_distances = list(Vts.values())
concepts = []
distances = []
for c, d in zip(_concepts, _distances):
concepts.append(c)
distances.append(d)
assert(len(concepts) == len(distances))
triples = []
for v, N in Ets.items():
if v in concepts:
for u, rels in N.items():
if u in concepts:
triples.append((u, rels, v))
ts = [concept2id[t_cpt] for t_cpt in target_concepts]
labels = []
found_num = 0
for c in concepts:
if c in ts:
found_num += 1
labels.append(1)
else:
labels.append(0)
res = [id2concept[x].replace("_", " ") for x in concepts]
triples = [(id2concept[x].replace("_", " "), y, id2concept[z].replace("_", " ")) for (x,y,z) in triples]
return {"concepts":res, "labels":labels, "distances":distances, "triples":triples}, found_num, len(res)
def process(input_path, output_path, T, max_B):
data = []
with open(input_path, 'r') as f:
for line in f.readlines():
data.append(json.loads(line))
examples = []
avg_len = 0
for ex in tqdm(data):
target = ex['ac']
source = ex['qc']
e, found, avg_nodes = find_neighbours_frequency(ex['sent'], source, target, T, max_B)
avg_len += avg_nodes
examples.append(e)
print('{} hops avg nodes: {}'.format(T, avg_len / len(examples)))
with open(output_path, 'w') as f:
for line in examples:
json.dump(line ,f)
f.write('\n')
def load_total_concepts(data_path):
global concept2id, total_concepts_id, config
total_concepts = []
total_concepts_id = []
exs = []
for path in [data_path + "/train/concepts_nv.json", data_path + "/dev/concepts_nv.json"]:
with open(path, 'r') as f:
for line in f.readlines():
line = json.loads(line)
total_concepts.extend(line['qc'] + line['ac'])
total_concepts = list(set(total_concepts))
filtered_total_conncepts = []
for x in total_concepts:
if concept2id.get(x, False):
total_concepts_id.append(concept2id[x])
filtered_total_conncepts.append(x)
with open(data_path + "/total_concepts.txt", 'w') as f:
for line in filtered_total_conncepts:
f.write(str(line) + '\n')
dataset = sys.argv[1]
T = 2
max_B = 100
DATA_PATH = config["paths"][dataset + "_dir"]
load_resources()
load_cpnet()
load_total_concepts(DATA_PATH)
process(DATA_PATH + "/train/concepts_nv.json", DATA_PATH + "/train/{}hops_{}_triple.json".format(T, max_B), T, max_B)
process(DATA_PATH + "/dev/concepts_nv.json", DATA_PATH + "/dev/{}hops_{}_triple.json".format(T, max_B), T, max_B)
process(DATA_PATH + "/test/concepts_nv.json", DATA_PATH + "/test/{}hops_{}_triple.json".format(T, max_B), T, max_B) | 6,759 | 29.86758 | 117 | py |
DeepModel | DeepModel-master/testing/demo.py | import sys
paths = {}
with open('../path.config', 'r') as f:
for line in f:
name, path = line.split(': ')
print name, path
paths[name] = path
sys.path.insert(0, paths['pycaffe_root'])
import caffe
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from mpl_toolkits.mplot3d import Axes3D
import cv2
joints = np.arange(31)
Edges = [[0, 1], [1, 2], [2, 3], [3, 4],
[5, 6], [6, 7], [7, 8], [8, 9],
[10, 11], [11, 12], [12, 13], [13, 14],
[15, 16], [16, 17], [17, 18], [18, 19],
[4, 20], [9, 21], [14, 22], [19, 23],
[20, 24], [21, 24], [22, 24], [23, 24],
[24, 25], [24, 26], [24, 27],
[27, 28], [28, 29], [29, 30]]
J = len(joints)
if __name__ == '__main__':
#caffe.set_mode_gpu()
net = caffe.Net( 'DeepModel_deploy.prototxt',
'weights/NYU.caffemodel',
caffe.TEST)
list_images = ['0.png', '772.png', '1150.png', '1350.png', '1739.png']
for image_name in list_images:
img = cv2.imread('test_images\\' + image_name)
cv2.imshow('img_input', img)
input = (cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255. * 2 - 1)
blobs_in = {'data': input.reshape(1, 1, img.shape[0], img.shape[1])}
out = net.forward(**blobs_in)
joint = out['pred'][0]
x = np.zeros(J)
y = np.zeros(J)
z = np.zeros(J)
for j in range(J):
x[j] = joint[joints[j] * 3]
y[j] = joint[joints[j] * 3 + 1]
z[j] = joint[joints[j] * 3 + 2]
cv2.circle(img, (int((x[j] + 1) / 2 * 128), int((- y[j] + 1) / 2 * 128)), 2, (255, 0, 0), 2)
fig=plt.figure()
ax=fig.add_subplot((111),projection='3d')
ax.set_xlabel('z')
ax.set_ylabel('x')
ax.set_zlabel('y')
ax.scatter(z, -x, y)
for e in Edges:
ax.plot(z[e], -x[e], y[e], c = 'b')
#For axes equal
max_range = np.array([x.max()-x.min(), y.max()-y.min(), z.max()-z.min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(x.max()+x.min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(y.max()+y.min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(z.max()+z.min())
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([zb], [xb], [yb], 'w')
cv2.imshow('img_pred', img)
plt.show()
| 2,400 | 33.797101 | 100 | py |
machine-learning-applied-to-cfd | machine-learning-applied-to-cfd-master/notebooks/helper_module.py | '''Module containing function that are too large to be included in the notebooks.'''
import torch
import numpy as np
class SimpleMLP(torch.nn.Module):
def __init__(self, n_inputs=1, n_outputs=1, n_layers=1, n_neurons=10, activation=torch.sigmoid, batch_norm=False):
super().__init__()
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.n_layers = n_layers
self.n_neurons = n_neurons
self.activation = activation
self.batch_norm = batch_norm
self.layers = torch.nn.ModuleList()
if self.batch_norm:
# input layer to first hidden layer
self.layers.append(torch.nn.Linear(self.n_inputs, self.n_neurons*2, bias=False))
self.layers.append(torch.nn.BatchNorm1d(self.n_neurons*2))
# add more hidden layers if specified
if self.n_layers > 2:
for hidden in range(self.n_layers-2):
self.layers.append(torch.nn.Linear(self.n_neurons*2, self.n_neurons*2, bias=False))
self.layers.append(torch.nn.BatchNorm1d(self.n_neurons*2))
self.layers.append(torch.nn.Linear(self.n_neurons*2, self.n_neurons, bias=False))
self.layers.append(torch.nn.BatchNorm1d(self.n_neurons))
else:
# input layer to first hidden layer
self.layers.append(torch.nn.Linear(self.n_inputs, self.n_neurons))
# add more hidden layers if specified
if self.n_layers > 1:
for hidden in range(self.n_layers-1):
self.layers.append(torch.nn.Linear(self.n_neurons, self.n_neurons))
# last hidden layer to output layer
self.layers.append(torch.nn.Linear(self.n_neurons, self.n_outputs))
print("Created model with {} weights.".format(self.model_parameters()))
def forward(self, x):
if self.batch_norm:
for i_layer in range(len(self.layers)-1):
if isinstance(self.layers[i_layer], torch.nn.Linear):
x = self.layers[i_layer](x)
else:
x = self.activation(self.layers[i_layer](x))
else:
for i_layer in range(len(self.layers)-1):
x = self.activation(self.layers[i_layer](x))
return self.layers[-1](x)
def model_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def approximate_function(x_train, y_train, x_val, y_val, model, l_rate=0.001, batch_size=128,
max_iter=1000, path=None, device='cpu', verbose=100):
'''Train MLP to approximate a function y(x).
The training stops when the maximum number of training epochs is reached.
Parameters
----------
x_train - array-like : argument of the function; used for training
y_train - array-like : function value at x; used for training
x_val - array-like : argument of the function; used for validation
y_val - array-like : function value at x; used for validation
model - SimpleMLP : PyTorch model which is adjusted to approximate the function
l_rate - Float : learning rate for weight optimization
batch_size - Integer : batch size for training data
max_iter - Integer : maximum number of allowed training epochs
path - String : location to save model weights
device - String : either cpu or gpu
verbose - Integer : defines frequency for loss information output
Returns
-------
model - SimpleMLP : trained version of the given model
loss_train - array-like : training loss
loss_val - array-like : validation loss
'''
# convert numpy arrays to torch tensors
x_train_tensor = torch.from_numpy(x_train.astype(np.float32))
y_train_tensor = torch.from_numpy(y_train.astype(np.float32))
x_val_tensor = torch.from_numpy(x_val.astype(np.float32))
y_val_tensor = torch.from_numpy(y_val.astype(np.float32))
# define loss function
criterion = torch.nn.MSELoss()
# define optimizer
optimizer = torch.optim.Adam(params=model.parameters(), lr=l_rate)
# training loop
history_train = []
history_val = []
best_loss = 1.0E5
count = 0
# move model and data to gpu if available
model.to(device)
x_val_device, y_val_device = x_val_tensor.to(device), y_val_tensor.to(device)
n_batches = np.ceil(x_train.shape[0] / batch_size)
for e in range(1, max_iter+1):
# backpropagation
model = model.train()
loss_sum_batches = 0.0
for b in range(int(n_batches)):
x_batch = x_train_tensor[b*batch_size:min(x_train_tensor.shape[0], (b+1)*batch_size)].to(device)
y_batch = y_train_tensor[b*batch_size:min(x_train_tensor.shape[0], (b+1)*batch_size)].to(device)
optimizer.zero_grad()
output_train = model(x_batch)
loss_train = criterion(output_train.squeeze(dim=1), y_batch)
loss_train.backward()
optimizer.step()
loss_sum_batches += loss_train.item()
history_train.append(loss_sum_batches / n_batches)
# validation
with torch.no_grad():
model = model.eval()
output_val = model.forward(x_val_device)
loss_val = criterion(output_val.squeeze(dim=1), y_val_device)
history_val.append(loss_val.item())
# check maximum error for validation data
diff_val = output_val.squeeze(dim=1) - y_val_device
max_diff_val = np.amax(np.absolute(diff_val.cpu().detach().numpy()))
if history_train[-1] < best_loss:
count += 1
best_loss = history_train[-1]
if count % verbose == 0:
print("Training loss decreased in epoch {}: {}".format(e, history_train[-1]))
print("Validation loss/max. dev.: {}/{}".format(loss_val.item(), max_diff_val))
print("--------------------------------")
if path is not None:
if count % verbose == 0:
print("Saving model as {}".format(path))
print("--------------------------------")
torch.save(model.state_dict(), path)
return model.eval(), np.asarray(history_train), np.asarray(history_val)
| 6,317 | 44.128571 | 118 | py |
adapt | adapt-master/tests/test_ccsa.py | import numpy as np
import tensorflow as tf
from adapt.utils import make_classification_da
from adapt.feature_based import CCSA
from tensorflow.keras.initializers import GlorotUniform
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
np.random.seed(0)
tf.random.set_seed(0)
task = tf.keras.Sequential()
task.add(tf.keras.layers.Dense(50, activation="relu", kernel_initializer=GlorotUniform(seed=0)))
task.add(tf.keras.layers.Dense(2, activation="softmax", kernel_initializer=GlorotUniform(seed=0)))
ind = np.random.choice(100, 10)
Xs, ys, Xt, yt = make_classification_da()
def test_ccsa():
ccsa = CCSA(task=task, loss="categorical_crossentropy",
optimizer=Adam(), metrics=["acc"], gamma=0.1, random_state=0)
ccsa.fit(Xs, tf.one_hot(ys, 2).numpy(), Xt=Xt[ind],
yt=tf.one_hot(yt, 2).numpy()[ind], epochs=100, verbose=0)
assert np.mean(ccsa.predict(Xt).argmax(1) == yt) > 0.8
ccsa = CCSA(task=task, loss="categorical_crossentropy",
optimizer=Adam(), metrics=["acc"], gamma=1., random_state=0)
ccsa.fit(Xs, tf.one_hot(ys, 2).numpy(), Xt=Xt[ind],
yt=tf.one_hot(yt, 2).numpy()[ind], epochs=100, verbose=0)
assert np.mean(ccsa.predict(Xt).argmax(1) == yt) < 0.9 | 1,311 | 36.485714 | 98 | py |
adapt | adapt-master/tests/test_tradaboost.py | """
Test functions for tradaboost module.
"""
import copy
import numpy as np
import scipy
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, RidgeClassifier
from sklearn.metrics import r2_score, accuracy_score
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from adapt.instance_based import (TrAdaBoost,
TrAdaBoostR2,
TwoStageTrAdaBoostR2)
np.random.seed(0)
Xs = np.concatenate((
np.random.randn(50)*0.1,
np.random.randn(50)*0.1 + 1.,
)).reshape(-1, 1)
Xt = (np.random.randn(100) * 0.1).reshape(-1, 1)
ys_reg = np.array([1. * x if x<0.5 else
10 for x in Xs.ravel()]).reshape(-1, 1)
yt_reg = np.array([1. * x if x<0.5 else
10 for x in Xt.ravel()]).reshape(-1, 1)
ys_classif = np.array(
[x<0 if x<0.5 else x<1 for x in Xs.ravel()]
).astype(float)
yt_classif = np.array(
[x<0 if x<0.5 else x<1 for x in Xt.ravel()]
).astype(float)
def test_tradaboost_fit():
np.random.seed(0)
model = TrAdaBoost(LogisticRegression(penalty='none',
solver='lbfgs'),
n_estimators=20)
model.fit(Xs, ys_classif, Xt=Xt[:10], yt=yt_classif[:10])
score = model.score(Xs, ys_classif)
assert score == accuracy_score(ys_classif, model.predict(Xs))
assert len(model.sample_weights_src_[0]) == 100
assert (model.sample_weights_src_[0][:50].sum() ==
model.sample_weights_src_[0][50:].sum())
assert len(model.sample_weights_tgt_[-1]) == 10
assert model.sample_weights_tgt_[-1].sum() > 0.3
assert (model.predict(Xt).ravel() == yt_classif).sum() > 90
def test_tradaboost_fit_keras_model():
np.random.seed(0)
est = tf.keras.Sequential()
est.add(tf.keras.layers.Dense(1, activation="sigmoid"))
est.compile(loss="bce", optimizer=Adam())
model = TrAdaBoost(est, n_estimators=2, random_state=0)
model.fit(Xs, ys_classif, Xt=Xt[:10], yt=yt_classif[:10])
yp = model.predict(Xt)
est = tf.keras.Sequential()
est.add(tf.keras.layers.Dense(2, activation="softmax"))
est.compile(loss="mse", optimizer=Adam())
model = TrAdaBoost(est, n_estimators=2, random_state=0)
model.fit(Xs, np.random.random((100, 2)),
Xt=Xt[:10], yt=np.random.random((10, 2)))
score = model.score(Xs, ys_classif)
assert score == accuracy_score(ys_classif, model.predict(Xs))
def test_tradaboostr2_fit():
np.random.seed(0)
model = TrAdaBoostR2(LinearRegression(fit_intercept=False),
n_estimators=100,
Xt=Xt[:10], yt=yt_reg[:10])
model.fit(Xs, ys_reg)
score = model.score(Xs, ys_reg)
assert score == r2_score(ys_reg, model.predict(Xs))
assert np.abs(model.estimators_[-1].coef_[0] - 1.) < 1
assert np.abs(model.sample_weights_src_[-1][:50].sum() /
model.sample_weights_src_[-1][50:].sum()) > 10
assert model.sample_weights_tgt_[-1].sum() > 0.7
assert np.abs(model.predict(Xt) - yt_reg).sum() < 1
assert np.all(model.predict_weights(domain="src") ==
model.sample_weights_src_[-1])
assert np.all(model.predict_weights(domain="tgt") ==
model.sample_weights_tgt_[-1])
def test_twostagetradaboostr2_fit():
np.random.seed(0)
model = TwoStageTrAdaBoostR2(LinearRegression(fit_intercept=False),
n_estimators=10)
model.fit(Xs, ys_reg.ravel(), Xt=Xt[:10], yt=yt_reg[:10].ravel())
score = model.score(Xs, ys_reg)
assert score == r2_score(ys_reg, model.predict(Xs))
assert np.abs(model.estimators_[-1].estimators_[-1].coef_[0]
- 1.) < 1
assert np.abs(model.sample_weights_src_[-1][:50].sum() /
model.sample_weights_src_[-1][50:].sum()) > 10
assert model.sample_weights_tgt_[-1].sum() > 0.7
assert np.abs(model.predict(Xt) - yt_reg).sum() < 1
argmin = np.argmin(model.estimator_errors_)
assert np.all(model.predict_weights(domain="src") ==
model.sample_weights_src_[argmin])
assert np.all(model.predict_weights(domain="tgt") ==
model.sample_weights_tgt_[argmin])
def test_tradaboost_deepcopy():
np.random.seed(0)
model = TrAdaBoost(LogisticRegression(penalty='none',
solver='lbfgs'),
n_estimators=20)
model.fit(Xs, ys_classif, Xt=Xt[:10], yt=yt_classif[:10])
copy_model = copy.deepcopy(model)
assert np.all(model.predict(Xt) == copy_model.predict(Xt))
assert hex(id(model)) != hex(id(copy_model))
def test_tradaboost_multiclass():
np.random.seed(0)
X = np.random.randn(10, 3)
y = np.random.choice(3, 10)
model = TrAdaBoost(LogisticRegression(penalty='none',
solver='lbfgs'), Xt=X, yt=y,
n_estimators=20)
model.fit(X, y)
yp = model.predict(X)
score = model.score(X, y)
assert set(np.unique(yp)) == set([0,1,2])
assert score == accuracy_score(y, yp)
def test_tradaboost_multireg():
np.random.seed(0)
X = np.random.randn(10, 3)
y = np.random.randn(10, 5)
model = TrAdaBoostR2(LinearRegression(),
Xt=X, yt=y,
n_estimators=20)
model.fit(X, y)
yp = model.predict(X)
score = model.score(X, y)
assert np.all(yp.shape == (10, 5))
assert score == r2_score(y, yp)
model = TwoStageTrAdaBoostR2(LinearRegression(),
Xt=X, yt=y,
n_estimators=3,
n_estimators_fs=3)
model.fit(X, y)
yp = model.predict(X)
score = model.score(X, y)
assert np.all(yp.shape == (10, 5))
assert score == r2_score(y, yp)
def test_tradaboost_above_05():
np.random.seed(0)
X = np.random.randn(10, 3)
y = np.random.randn(10, 5)
model = TrAdaBoostR2(LinearRegression(),
Xt=Xt[:10], yt=yt_reg[:10],
n_estimators=20)
model.fit(Xs, ys_reg)
assert np.any(np.array(model.estimator_errors_)>0.5)
model = TrAdaBoostR2(Ridge(1.),
Xt=Xt[:20], yt=yt_reg[:20],
n_estimators=20)
model.fit(Xs, ys_reg)
assert np.all(np.array(model.estimator_errors_)<0.5)
def test_tradaboost_lr():
np.random.seed(0)
model = TrAdaBoost(LogisticRegression(penalty='none'),
Xt=Xt[:10], yt=yt_classif[:10],
n_estimators=20, lr=.1)
model.fit(Xs, ys_classif)
err1 = model.estimator_errors_
model = TrAdaBoost(LogisticRegression(penalty='none'),
Xt=Xt[:10], yt=yt_classif[:10],
n_estimators=20, lr=2.)
model.fit(Xs, ys_classif)
err2 = model.estimator_errors_
assert np.sum(err1) > 5 * np.sum(err2)
def test_tradaboost_sparse_matrix():
X = scipy.sparse.csr_matrix(np.eye(200))
y = np.random.randn(100)
yc = np.random.choice(["e", "p"], 100)
Xt = X[:100]
Xs = X[100:]
model = TrAdaBoost(RidgeClassifier(), Xt=Xt[:10], yt=yc[:10])
model.fit(Xs, yc)
model.score(Xt, yc)
model.predict(Xs)
model = TrAdaBoostR2(Ridge(), Xt=Xt[:10], yt=y[:10])
model.fit(Xs, y)
model.score(Xt, y)
model.predict(Xs)
model = TwoStageTrAdaBoostR2(Ridge(), Xt=Xt[:10], yt=y[:10], n_estimators=3)
model.fit(Xs, y)
model.score(Xt, y)
model.predict(Xs) | 7,644 | 34.55814 | 93 | py |
adapt | adapt-master/tests/test_iwc.py | """
Test functions for iwn module.
"""
import numpy as np
from sklearn.linear_model import RidgeClassifier
from adapt.utils import make_classification_da
from adapt.instance_based import IWC
from adapt.utils import get_default_discriminator
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
Xs, ys, Xt, yt = make_classification_da()
def test_iwn():
model = IWC(RidgeClassifier(0.), classifier=RidgeClassifier(0.),
Xt=Xt, random_state=0)
model.fit(Xs, ys);
model.predict(Xt)
model.score(Xt, yt)
w1 = model.predict_weights()
w2 = model.predict_weights(Xs)
assert np.abs(w1-w2).sum() < 10**-5
def test_default_classif():
model = IWC(RidgeClassifier(0.), classifier=None,
Xt=Xt, random_state=0)
model.fit(Xs, ys);
model.predict(Xt)
model.score(Xt, yt)
w1 = model.predict_weights()
w2 = model.predict_weights(Xs)
assert np.abs(w1-w2).sum() < 10**-5
def test_nn_classif():
model = IWC(RidgeClassifier(0.), classifier=get_default_discriminator(),
cl_params=dict(epochs=10, optimizer=Adam(), loss="bce", verbose=0),
Xt=Xt, random_state=0)
model.fit(Xs, ys);
model.predict(Xt)
model.score(Xt, yt)
w1 = model.predict_weights()
w2 = model.predict_weights(Xs)
assert np.abs(w1-w2).sum() < 10**-5
| 1,428 | 27.58 | 83 | py |
adapt | adapt-master/tests/test_adda.py | """
Test functions for adda module.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.initializers import GlorotUniform
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from adapt.feature_based import ADDA
Xs = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.zeros((100, 1))
), axis=1)
Xt = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.ones((100, 1))
), axis=1)
ys = 0.2 * Xs[:, 0].ravel()
yt = 0.2 * Xt[:, 0].ravel()
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
model.add(Dense(1, input_shape=input_shape,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
def _get_discriminator(input_shape=(1,)):
model = Sequential()
model.add(Dense(10,
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="elu"))
model.add(Dense(1,
kernel_initializer=GlorotUniform(seed=0),
activation="sigmoid"))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
use_bias=False,
kernel_initializer=GlorotUniform(seed=0),
input_shape=input_shape))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_fit():
model = ADDA(_get_encoder(),
_get_task(), _get_discriminator(), pretrain__epochs=100,
loss="mse", optimizer_enc=Adam(0.005), optimizer_disc=Adam(0.01),
metrics=["mae"], random_state=0)
model.fit(Xs, ys, Xt, yt,
epochs=100, batch_size=34, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0]) < 0.2
assert np.sum(np.abs(np.ravel(model.predict_task(Xs, domain="src")) - ys)) < 13
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 25
def test_nopretrain():
tf.random.set_seed(0)
np.random.seed(0)
encoder = _get_encoder()
task = _get_task()
src_model = Sequential()
src_model.add(encoder)
src_model.add(task)
src_model.compile(loss="mse", optimizer=Adam(0.01))
src_model.fit(Xs, ys, epochs=100, batch_size=34, verbose=0)
Xs_enc = src_model.predict(Xs, verbose=0)
model = ADDA(encoder, task, _get_discriminator(), pretrain=False,
loss="mse", optimizer_enc=Adam(0.005), optimizer_disc=Adam(0.01),
metrics=["mae"], random_state=0)
model.fit(Xs_enc, ys, Xt, epochs=100, batch_size=34, verbose=0)
assert np.abs(model.encoder_.get_weights()[0][1][0]) < 0.2
assert np.sum(np.abs(np.ravel(model.predict(Xs)) - ys)) < 25
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 25 | 3,128 | 31.59375 | 83 | py |
adapt | adapt-master/tests/test_dann.py | """
Test functions for dann module.
"""
import pytest
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from adapt.feature_based import DANN
from adapt.utils import UpdateLambda
from tensorflow.keras.initializers import GlorotUniform
Xs = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.zeros((100, 1))
), axis=1)
Xt = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.ones((100, 1))
), axis=1)
ys = 0.2 * Xs[:, 0].reshape(-1, 1)
yt = 0.2 * Xt[:, 0].reshape(-1, 1)
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
model.add(Dense(1, input_shape=input_shape,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
def _get_discriminator(input_shape=(1,)):
model = Sequential()
model.add(Dense(10,
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="elu"))
model.add(Dense(1,
kernel_initializer=GlorotUniform(seed=0),
activation="sigmoid"))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
kernel_initializer=GlorotUniform(seed=0),
use_bias=False,
input_shape=input_shape))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_fit_lambda_zero():
tf.random.set_seed(0)
np.random.seed(0)
model = DANN(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=0, loss="mse", optimizer=Adam(0.01), metrics=["mae"],
random_state=0)
model.fit(Xs, ys, Xt=Xt, yt=yt,
epochs=200, batch_size=32, verbose=0)
assert isinstance(model, Model)
assert model.encoder_.get_weights()[0][1][0] == 1.0
assert np.sum(np.abs(model.predict(Xs) - ys)) < 0.01
assert np.sum(np.abs(model.predict(Xt) - yt)) > 10
def test_fit_lambda_one():
tf.random.set_seed(0)
np.random.seed(0)
model = DANN(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=1, loss="mse", optimizer=Adam(0.01), random_state=0)
model.fit(Xs, ys, Xt, yt,
epochs=100, batch_size=32, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0] /
model.encoder_.get_weights()[0][0][0]) < 0.15
assert np.sum(np.abs(model.predict(Xs) - ys)) < 1
assert np.sum(np.abs(model.predict(Xt) - yt)) < 2
def test_fit_lambda_update():
tf.random.set_seed(0)
np.random.seed(0)
model = DANN(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=tf.Variable(0.), loss="mse", optimizer=Adam(0.01), random_state=0)
model.fit(Xs, ys, Xt=Xt, yt=yt,
epochs=100, batch_size=32, verbose=0,
callbacks=UpdateLambda(max_steps=400, gamma=10.))
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0] /
model.encoder_.get_weights()[0][0][0]) < 0.2
assert np.sum(np.abs(model.predict(Xs) - ys)) < 1
assert np.sum(np.abs(model.predict(Xt) - yt)) < 5
assert model.lambda_.numpy() == 1
def test_optimizer_enc_disc():
tf.random.set_seed(0)
np.random.seed(0)
encoder = _get_encoder()
task = _get_task()
disc = _get_discriminator()
X_enc = encoder.predict(Xs)
task.predict(X_enc)
disc.predict(X_enc)
model = DANN(encoder, task, disc, copy=True,
optimizer_enc=Adam(0.0), optimizer_disc=Adam(0.001),
lambda_=tf.Variable(0.), loss="mse", optimizer=Adam(0.01), random_state=0)
model.fit(Xs, ys, Xt=Xt, yt=yt,
epochs=10, batch_size=32, verbose=0)
assert np.all(model.encoder_.get_weights()[0] == encoder.get_weights()[0])
assert np.any(model.task_.get_weights()[0] != task.get_weights()[0])
assert np.any(model.discriminator_.get_weights()[0] != disc.get_weights()[0])
def test_warnings():
with pytest.warns() as record:
model = DANN(gamma=10.)
assert len(record) == 1 | 4,469 | 33.651163 | 91 | py |
adapt | adapt-master/tests/test_mdd.py | """
Test functions for dann module.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import MDD
Xs = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.zeros((100, 1))
), axis=1)
Xt = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.ones((100, 1))
), axis=1)
ys = 0.2 * Xs[:, 0].ravel()
yt = 0.2 * Xt[:, 0].ravel()
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
model.add(Dense(1, input_shape=input_shape,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
def _get_discriminator(input_shape=(1,)):
model = Sequential()
model.add(Dense(10,
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="relu"))
model.add(Dense(1,
kernel_initializer=GlorotUniform(seed=0),
activation="sigmoid"))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
use_bias=False,
kernel_initializer=GlorotUniform(seed=0),
input_shape=input_shape))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_fit():
tf.random.set_seed(0)
np.random.seed(0)
model = MDD(_get_encoder(), _get_task(), lambda_=1.,
loss="mse", optimizer=Adam(0.01), metrics=["mse"])
model.fit(Xs, ys, Xt, yt,
epochs=100, batch_size=34, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0] /
model.encoder_.get_weights()[0][0][0]) < 0.3
assert np.sum(np.abs(model.predict(Xs).ravel() - ys)) < 0.1
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 7.
def test_not_same_weights():
tf.random.set_seed(0)
np.random.seed(0)
task = _get_task()
encoder = _get_encoder()
X_enc = encoder.predict(Xs)
task.predict(X_enc)
model = MDD(encoder, task, copy=False,
loss="mse", optimizer=Adam(0.01), metrics=["mse"])
model.fit(Xs, ys, Xt, yt,
epochs=0, batch_size=34, verbose=0)
assert np.any(model.task_.get_weights()[0] !=
model.discriminator_.get_weights()[0])
assert np.all(model.task_.get_weights()[0] ==
task.get_weights()[0])
def test_cce():
tf.random.set_seed(0)
np.random.seed(0)
task = _get_task(output_shape=(2,))
encoder = _get_encoder()
ys_2 = np.zeros((len(Xs), 2))
ys_2[Xs[:, 0]<0.5, 0] = 1
ys_2[Xs[:, 0]>=0.5, 1] = 1
yt_2 = np.zeros((len(Xt), 2))
yt_2[Xt[:, 0]<0.5, 0] = 1
yt_2[Xt[:, 0]>=0.5, 1] = 1
model = MDD(encoder, task, copy=False,
loss="categorical_crossentropy", optimizer=Adam(0.01), metrics=["acc"])
model.fit(Xs, ys_2, Xt, yt_2,
epochs=10, batch_size=34, verbose=0)
| 3,369 | 30.495327 | 87 | py |
adapt | adapt-master/tests/test_iwn.py | """
Test functions for iwn module.
"""
from sklearn.linear_model import RidgeClassifier
from adapt.utils import make_classification_da
from adapt.instance_based import IWN
from adapt.utils import get_default_task
from sklearn.neighbors import KNeighborsClassifier
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
Xs, ys, Xt, yt = make_classification_da()
def test_iwn():
model = IWN(RidgeClassifier(0.), Xt=Xt, sigma_init=0.1, random_state=0,
pretrain=True, pretrain__epochs=100, pretrain__verbose=0)
model.fit(Xs, ys, epochs=100, batch_size=256, verbose=0)
model.score(Xt, yt)
model.predict(Xs)
model.predict_weights(Xs)
def test_iwn_fit_estim():
task = get_default_task()
task.compile(optimizer=Adam(), loss="mse", metrics=["mae"])
model = IWN(task, Xt=Xt, sigma_init=0.1, random_state=0,
pretrain=True, pretrain__epochs=100, pretrain__verbose=0)
model.fit(Xs, ys)
model.score(Xt, yt)
model.predict(Xs)
model.predict_weights(Xs)
model = IWN(KNeighborsClassifier(), Xt=Xt, sigma_init=0.1, random_state=0,
pretrain=True, pretrain__epochs=100, pretrain__verbose=0)
model.fit(Xs, ys)
model.score(Xt, yt)
model.predict(Xs)
model.predict_weights(Xs)
| 1,349 | 31.142857 | 78 | py |
adapt | adapt-master/tests/test_coral.py | """
Test functions for coral module.
"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from scipy import linalg
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import CORAL, DeepCORAL
np.random.seed(0)
Xs = np.random.multivariate_normal(
np.array([0, 0]),
np.array([[0.001, 0], [0, 1]]),
1000)
Xt = np.random.multivariate_normal(
np.array([0, 0]),
np.array([[0.1, 0.2], [0.2, 0.5]]),
1000)
ys = np.zeros(1000)
yt = np.zeros(1000)
ys[Xs[:, 1]>0] = 1
yt[(Xt[:, 1]-0.5*Xt[:, 0])>0] = 1
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
model.add(Dense(2, input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(2,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
kernel_initializer=GlorotUniform(seed=0),
input_shape=input_shape,
use_bias=False,
activation="sigmoid"))
model.compile(loss="mse", optimizer="adam")
return model
def test_setup():
model = LogisticRegression()
model.fit(Xs, ys)
assert model.coef_[0][0] < 0.1 * model.coef_[0][1]
assert (model.predict(Xs) == ys).sum() / len(Xs) >= 0.99
assert (model.predict(Xt) == yt).sum() / len(Xt) < 0.97
def test_fit_coral():
np.random.seed(0)
model = CORAL(LogisticRegression(), lambda_=0.)
model.fit(Xs, ys, Xt=Xt)
assert isinstance(model.estimator_, LogisticRegression)
assert len(model.estimator_.coef_[0]) == Xs.shape[1]
assert (model.predict(Xt) == yt).sum() / len(Xt) >= 0.99
def test_fit_coral_complex():
np.random.seed(0)
model = CORAL(LogisticRegression(), lambda_=0.)
Xs_ = np.random.randn(10, 100)
Xt_ = np.random.randn(10, 100)
model.fit(Xs_, ys[:10], Xt=Xt_)
assert np.iscomplexobj(linalg.inv(linalg.sqrtm(model.Cs_)))
assert np.iscomplexobj(linalg.sqrtm(model.Ct_))
model.predict(Xs_, domain="src")
def test_fit_deepcoral():
tf.random.set_seed(0)
np.random.seed(0)
model = DeepCORAL(_get_encoder(), _get_task(), metrics=["mse"])
model.fit(Xs, ys, Xt,
epochs=100, batch_size=64, verbose=0)
assert isinstance(model.encoder_, Model)
assert isinstance(model.task_, Model)
assert len(model.encoder_.get_weights()[0]) == Xs.shape[1]
assert np.abs(np.cov(Xs, rowvar=False) -
np.cov(Xt, rowvar=False)).sum() > 0.5
assert np.abs(np.cov(model.encoder_.predict(Xs), rowvar=False) -
np.cov(model.encoder_.predict(Xt), rowvar=False)).sum() < 0.3
assert (np.abs(model.predict(Xt) - yt) < 0.5).sum() / len(Xt) >= 0.99 | 2,957 | 30.806452 | 73 | py |
adapt | adapt-master/tests/test_mcd.py | """
Test functions for dann module.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import MCD
Xs = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.zeros((100, 1))
), axis=1)
Xt = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.ones((100, 1))
), axis=1)
ys = 0.2 * Xs[:, 0].ravel()
yt = 0.2 * Xt[:, 0].ravel()
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
model.add(Dense(1, input_shape=input_shape,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
def _get_discriminator(input_shape=(1,)):
model = Sequential()
model.add(Dense(10,
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="relu"))
model.add(Dense(1,
kernel_initializer=GlorotUniform(seed=0),
activation="sigmoid"))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
kernel_initializer=GlorotUniform(seed=0),
use_bias=False,
input_shape=input_shape))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_fit():
tf.random.set_seed(0)
np.random.seed(0)
model = MCD(_get_encoder(), _get_task(),
loss="mse", optimizer=Adam(0.01), metrics=["mse"])
model.fit(Xs, ys, Xt, yt,
epochs=50, batch_size=34, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0]) < 0.2
assert np.sum(np.abs(model.predict(Xs).ravel() - ys)) > 5
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 11
yp_avg = model.predict_avg(Xt)
ypt = model.predict(Xt)
ypd = model.predict_disc(Xt)
assert np.all(yp_avg == 0.5 * (ypt+ypd))
def test_n_steps():
tf.random.set_seed(0)
np.random.seed(0)
model = MCD(_get_encoder(), _get_task(), n_steps=4,
loss="mse", optimizer=Adam(0.01), metrics=["mse"])
model.fit(Xs, ys, Xt, yt,
epochs=50, batch_size=34, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0]) < 0.1
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 11
| 2,758 | 30 | 66 | py |
adapt | adapt-master/tests/test_regular.py | """
Test functions for regular module.
"""
import pytest
import numpy as np
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.gaussian_process import GaussianProcessRegressor, GaussianProcessClassifier
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.base import clone
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.utils import make_classification_da, make_regression_da
from adapt.parameter_based import (RegularTransferLR,
RegularTransferLC,
RegularTransferNN,
RegularTransferGP)
np.random.seed(0)
Xs = np.concatenate((
np.random.randn(50)*0.1,
np.random.randn(50)*0.1 + 1.,
)).reshape(-1, 1)
Xt = (np.random.randn(100) * 0.1).reshape(-1, 1)
ys_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xs.ravel()]).reshape(-1, 1)
yt_reg = np.array([0.2 * x if x<0.5 else
10 for x in Xt.ravel()]).reshape(-1, 1)
ys_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xs.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
yt_classif = np.sign(np.array(
[x<0 if x<0.5 else x<1 for x in Xt.ravel()]
).astype(float) - 0.5).reshape(-1, 1)
def _get_network(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
use_bias=False))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_setup():
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
assert np.abs(lr.coef_[0][0] - 10) < 1
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
assert (lr.predict(Xt) == yt_classif.ravel()).sum() < 70
def test_regularlr_fit():
np.random.seed(0)
lr = LinearRegression(fit_intercept=False)
lr.fit(Xs, ys_reg)
model = RegularTransferLR(lr, lambda_=0.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 0.2) < 1
assert np.abs(model.predict(Xt) - yt_reg).sum() < 2
model = RegularTransferLR(lr, lambda_=1000000)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 10) < 1
assert np.abs(model.estimator_.coef_[0] - lr.coef_[0]) < 0.001
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt_reg)
assert np.abs(model.estimator_.coef_[0] - 4) < 1
def test_regularlr_multioutput():
np.random.seed(0)
X = np.random.randn(100, 5)+2.
y = X[:, :2]
lr = LinearRegression()
lr.fit(X, y)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(X, y)
assert np.abs(model.predict(X) - y).sum() < 2
assert np.all(model.coef_.shape == (2, 5))
assert np.all(model.intercept_.shape == (2,))
assert model.score(X, y) > 0.9
def test_regularlr_error():
np.random.seed(0)
Xs = np.random.randn(100, 5)
Xt = np.random.randn(100, 5)
ys = np.random.randn(100)
yt = np.random.randn(100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xt, yt)
with pytest.raises(ValueError) as excinfo:
model.fit(np.random.randn(100, 4), yt)
assert "expected 5, got 4" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
model.fit(Xt, np.random.randn(100, 2))
assert "expected 1, got 2" in str(excinfo.value)
def test_regularlc_fit():
np.random.seed(0)
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys_classif)
model = RegularTransferLC(lr, lambda_=0)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 90
model = RegularTransferLC(lr, lambda_=100000000)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() < 70
assert np.abs(model.estimator_.coef_[0][0] - lr.coef_[0][0]) < 0.001
assert np.abs(model.estimator_.intercept_ - lr.intercept_[0]) < 0.001
model = RegularTransferLC(lr, lambda_=1.2)
model.fit(Xt, yt_classif)
assert (model.predict(Xt) == yt_classif.ravel()).sum() > 95
def test_regularlc_multiclass():
np.random.seed(0)
X = np.random.randn(100, 5)
y = np.zeros(len(X))
y[X[:, :2].sum(1)<0] = 1
y[X[:, 3:].sum(1)>0] = 2
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(X, y)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(X, y)
assert (model.predict(X) == y).sum() > 90
assert np.all(model.coef_.shape == (3, 5))
assert np.all(model.intercept_.shape == (3,))
assert model.score(X, y) > 0.9
def test_regularnn_fit():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, lambdas=0., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
# assert np.abs(network.predict(Xs) - ys_reg).sum() < 1
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) > 4.
assert np.abs(model.predict(Xt) - yt_reg).sum() < 10
model = RegularTransferNN(network, lambdas=10000000., optimizer=Adam(0.1))
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
assert np.sum(np.abs(network.get_weights()[0] - model.get_weights()[0])) < 0.001
assert np.abs(model.predict(Xt) - yt_reg).sum() > 10
def test_regularnn_reg():
tf.random.set_seed(0)
np.random.seed(0)
network = _get_network()
network.fit(Xs, ys_reg, epochs=100, batch_size=100, verbose=0)
model = RegularTransferNN(network, regularizer="l1")
model.fit(Xt, yt_reg, epochs=100, batch_size=100, verbose=0)
with pytest.raises(ValueError) as excinfo:
model = RegularTransferNN(network, regularizer="l3")
assert "l1' or 'l2', got, l3" in str(excinfo.value)
def test_clone():
Xs = np.random.randn(100, 5)
ys = np.random.choice(2, 100)
lr = LinearRegression()
lr.fit(Xs, ys)
model = RegularTransferLR(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model
lr = LogisticRegression(penalty='none', solver='lbfgs')
lr.fit(Xs, ys)
model = RegularTransferLC(lr, lambda_=1.)
model.fit(Xs, ys)
new_model = clone(model)
new_model.fit(Xs, ys)
new_model.predict(Xs);
assert model is not new_model
def test_regulargp_reg():
Xs, ys, Xt, yt = make_regression_da()
kernel = Matern() + WhiteKernel()
src_model = GaussianProcessRegressor(kernel)
src_model.fit(Xs, ys)
score1 = src_model.score(Xt, yt)
tgt_model = RegularTransferGP(src_model, lambda_=1.)
tgt_model.fit(Xt[:3], yt[:3])
score2 = tgt_model.score(Xt, yt)
assert score1 < score2
def test_regulargp_classif():
Xs, ys, Xt, yt = make_classification_da()
kernel = Matern() + WhiteKernel()
src_model = GaussianProcessClassifier(kernel)
src_model.fit(Xs, ys)
score1 = src_model.score(Xt, yt)
tgt_model = RegularTransferGP(src_model, lambda_=1.)
tgt_model.fit(Xt[:3], yt[:3])
score2 = tgt_model.score(Xt, yt)
assert score1 < score2
def test_regulargp_multi_classif():
Xs, ys, Xt, yt = make_classification_da()
ys[:5] = 3
kernel = Matern() + WhiteKernel()
src_model = GaussianProcessClassifier(kernel)
src_model.fit(Xs, ys)
score1 = src_model.score(Xt, yt)
tgt_model = RegularTransferGP(src_model, lambda_=1.)
tgt_model.fit(Xt[:3], yt[:3])
score2 = tgt_model.score(Xt, yt)
assert score1 < score2 | 8,066 | 32.473029 | 88 | py |
adapt | adapt-master/tests/test_wann.py | """
Test functions for wann module.
"""
import numpy as np
from sklearn.linear_model import LinearRegression
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
from adapt.instance_based import WANN
np.random.seed(0)
Xs = np.concatenate((
np.random.randn(50)*0.1,
np.random.randn(50)*0.1 + 1.,
)).reshape(-1, 1)
Xt = (np.random.randn(100) * 0.1).reshape(-1, 1)
ys = np.array([0.2 * x if x<0.5
else 10 for x in Xs.ravel()]).reshape(-1, 1)
yt = np.array([0.2 * x if x<0.5
else 10 for x in Xt.ravel()]).reshape(-1, 1)
def test_fit():
np.random.seed(0)
tf.random.set_seed(0)
model = WANN(random_state=0, optimizer=Adam(0.01))
model.fit(Xs, ys, Xt, yt, epochs=200, verbose=0)
assert np.abs(model.predict(Xt) - yt).sum() < 10
| 871 | 27.129032 | 59 | py |
adapt | adapt-master/tests/test_cdan.py | """
Test functions for cdan module.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import CDAN
from adapt.utils import make_classification_da
Xs, ys, Xt, yt = make_classification_da()
yss = np.zeros((len(ys), 2))
yss[ys==0, 0] = 1
yss[ys==1, 1] = 1
ytt = np.zeros((len(yt), 2))
ytt[yt==0, 0] = 1
ytt[yt==1, 1] = 1
def _entropy(x):
return -np.sum(x * np.log(x), 1)
def _get_encoder(input_shape=Xs.shape[1:], units=10):
model = Sequential()
model.add(Dense(units, input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),))
model.compile(loss="mse", optimizer="adam")
return model
def _get_discriminator(input_shape=(10*2,)):
model = Sequential()
model.add(Dense(10,
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="relu"))
model.add(Dense(1, activation="sigmoid", kernel_initializer=GlorotUniform(seed=0)))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(10,)):
model = Sequential()
model.add(Dense(2,
kernel_initializer=GlorotUniform(seed=0),
input_shape=input_shape,
activation="softmax"))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_fit_lambda_zero():
tf.random.set_seed(1)
np.random.seed(1)
model = CDAN(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=0, loss="categorical_crossentropy",
optimizer=Adam(0.001), metrics=["acc"],
random_state=0, validation_data=(Xt, ytt))
model.fit(Xs, yss, Xt, ytt,
epochs=300, verbose=0)
assert model.history_['acc'][-1] > 0.9
assert model.history_['val_acc'][-1] < 0.9
def test_fit_lambda_one_no_entropy():
tf.random.set_seed(1)
np.random.seed(1)
model = CDAN(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=1., entropy=False, loss="categorical_crossentropy",
optimizer=Adam(0.001), metrics=["acc"],
random_state=0, validation_data=(Xt, ytt))
model.fit(Xs, yss, Xt, ytt,
epochs=300, verbose=0)
assert model.history_['acc'][-1] > 0.8
assert model.history_['val_acc'][-1] > 0.8
def test_fit_lambda_entropy():
tf.random.set_seed(1)
np.random.seed(1)
encoder = _get_encoder()
encoder.trainable = False
model = CDAN(encoder, _get_task(), _get_discriminator(),
lambda_=1., entropy=True, loss="categorical_crossentropy",
optimizer=Adam(0.01), metrics=["acc"],
random_state=0)
model.fit(Xs, yss, Xt, ytt,
epochs=40, verbose=0)
ys_disc = model.predict_disc(Xs).ravel()
ys_ent = _entropy(model.predict(Xs))
yt_disc = model.predict_disc(Xt).ravel()
yt_ent = _entropy(model.predict(Xt))
assert np.corrcoef(yt_ent, yt_disc)[0, 1] > 0.
assert np.corrcoef(ys_ent, ys_disc)[0, 1] < 0.
def test_fit_max_features():
tf.random.set_seed(1)
np.random.seed(1)
model = CDAN(_get_encoder(), _get_task(), _get_discriminator((10,)), max_features=10,
lambda_=0., entropy=False, loss="categorical_crossentropy",
optimizer=Adam(0.01), metrics=["acc"],
random_state=0)
model.fit(Xs, yss, Xt, ytt,
epochs=30, verbose=0)
assert model._random_task.shape == (2, 10)
assert model._random_enc.shape == (10, 10)
assert model.predict_disc(Xt).mean() < 0.5
assert model.predict_disc(Xs).mean() > 0.5 | 3,932 | 32.330508 | 89 | py |
adapt | adapt-master/tests/test_base.py | """
Test base
"""
import copy
import shutil
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from sklearn.utils.estimator_checks import check_estimator
from sklearn.base import clone
from adapt.base import BaseAdaptEstimator, BaseAdaptDeep
from adapt.metrics import normalized_linear_discrepancy
Xs = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.zeros((100, 1))
), axis=1)
Xt = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.ones((100, 1))
), axis=1)
ys = 0.2 * Xs[:, 0].ravel()
yt = 0.2 * Xt[:, 0].ravel()
def _custom_metric(yt, yp):
return tf.shape(yt)[0]
class DummyFeatureBased(BaseAdaptEstimator):
def fit_transform(self, Xs, **kwargs):
return Xs
def transform(self, Xs):
return Xs
class DummyInstanceBased(BaseAdaptEstimator):
def fit_weights(self, Xs, **kwargs):
return np.ones(len(Xs))
def predict_weights(self):
return np.ones(100)
class DummyParameterBased(BaseAdaptEstimator):
def fit(self, Xs, ys):
return self.fit_estimator(Xs, ys)
def test_base_adapt_estimator():
base_adapt = BaseAdaptEstimator(Xt=Xt)
for check in check_estimator(base_adapt, generate_only=True):
try:
check[1](base_adapt)
except Exception as e:
if "The Adapt model should implement a transform or predict_weights methods" in str(e):
print(str(e))
else:
raise
def test_base_adapt_score():
model = DummyParameterBased(Xt=Xt, random_state=0)
model.fit(Xs, ys)
model.score(Xt, yt)
model = DummyFeatureBased(Xt=Xt, random_state=0)
model.fit(Xs, ys)
model.score(Xt, yt)
s1 = model.unsupervised_score(Xs, Xt)
s2 = normalized_linear_discrepancy(model.transform(Xs), Xt)
assert s1 == s2
model = DummyInstanceBased(Xt=Xt, random_state=0)
model.fit(Xs, ys)
model.score(Xt, yt)
s1 = model.unsupervised_score(Xs, Xt)
np.random.seed(0)
bs_index = np.random.choice(len(Xs), len(Xs), p=np.ones(len(Xs))/len(Xs))
s2 = normalized_linear_discrepancy(Xs[bs_index], Xt)
assert s1 == s2
# def test_base_adapt_val_sample_size():
# model = DummyFeatureBased(Xt=Xt, random_state=0, val_sample_size=10)
# model.fit(Xs, ys)
# model.score(Xt, yt)
# assert len(model.Xs_) == 10
# assert len(model.Xt_) == 10
# assert np.all(model.Xs_ == Xs[model.src_index_])
def test_base_adapt_keras_estimator():
est = Sequential()
est.add(Dense(1, input_shape=Xs.shape[1:]))
est.compile(loss="mse", optimizer=Adam(0.01))
model = BaseAdaptEstimator(est, Xt=Xt)
model.fit(Xs, ys)
assert model.estimator_.loss == "mse"
assert isinstance(model.estimator_.optimizer, Adam)
assert model.estimator_.optimizer.learning_rate == 0.01
model = BaseAdaptEstimator(est, Xt=Xt, loss="mae",
optimizer=Adam(0.01, beta_1=0.5),
learning_rate=0.1)
model.fit(Xs, ys)
assert model.estimator_.loss == "mae"
assert isinstance(model.estimator_.optimizer, Adam)
assert model.estimator_.optimizer.learning_rate == 0.1
assert model.estimator_.optimizer.beta_1 == 0.5
model = BaseAdaptEstimator(est, Xt=Xt, optimizer="sgd")
model.fit(Xs, ys)
assert not isinstance(model.estimator_.optimizer, Adam)
est = Sequential()
est.add(Dense(1, input_shape=Xs.shape[1:]))
model = BaseAdaptEstimator(est, Xt=Xt, loss="mae",
optimizer=Adam(0.01, beta_1=0.5),
learning_rate=0.1)
model.fit(Xs, ys)
assert model.estimator_.loss == "mae"
assert isinstance(model.estimator_.optimizer, Adam)
assert model.estimator_.optimizer.learning_rate == 0.1
assert model.estimator_.optimizer.beta_1 == 0.5
s1 = model.score(Xt[:10], yt[:10])
s2 = model.estimator_.evaluate(Xt[:10], yt[:10])
assert s1 == s2
copy_model = copy.deepcopy(model)
assert s1 == copy_model.score(Xt[:10], yt[:10])
assert hex(id(model)) != hex(id(copy_model))
def test_base_adapt_deep():
model = BaseAdaptDeep(Xt=Xt, loss="mse",
epochs=2,
optimizer=Adam(),
learning_rate=0.1,
random_state=0)
model.fit(Xs, ys)
yp = model.predict(Xt)
score = model.score(Xt, yt)
score_adapt = model.unsupervised_score(Xs, Xt)
X_enc = model.transform(Xs)
ypt = model.predict_task(Xt)
ypd = model.predict_disc(Xt)
new_model = clone(model)
new_model.fit(Xs, ys)
yp2 = new_model.predict(Xt)
score2 = new_model.score(Xt, yt)
score_adapt2 = new_model.unsupervised_score(Xs, Xt)
X_enc2 = new_model.transform(Xs)
ypt2 = new_model.predict_task(Xt)
ypd2 = new_model.predict_disc(Xt)
model.save("model.tf", save_format="tf")
new_model = tf.keras.models.load_model("model.tf")
shutil.rmtree("model.tf")
yp3 = new_model.predict(Xt)
assert isinstance(model.optimizer, Adam)
assert np.abs(model.optimizer.learning_rate.numpy() - 0.1) < 1e-6
assert hasattr(model, "encoder_")
assert hasattr(model, "task_")
assert hasattr(model, "discriminator_")
assert np.all(yp == yp2)
assert score == score2
assert score_adapt == score_adapt2
assert np.all(ypt == ypt2)
assert np.all(ypd == ypd2)
assert np.all(X_enc == X_enc2)
assert np.mean(np.abs(yp - yp3)) < 1e-6
def test_base_deep_validation_data():
model = BaseAdaptDeep(Xt=Xt)
model.fit(Xs, ys, validation_data=(Xt, yt))
model.fit(Xs, ys, validation_split=0.1)
model = BaseAdaptDeep(Xt=Xt, yt=yt)
model.fit(Xs, ys, validation_data=(Xt, yt))
model.fit(Xs, ys, validation_split=0.1)
def test_base_deep_dataset():
model = BaseAdaptDeep()
model.fit(Xs, ys, Xt=Xt, validation_data=(Xs, ys))
model.predict(Xs)
model.evaluate(Xs, ys)
dataset = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(Xs),
tf.data.Dataset.from_tensor_slices(ys.reshape(-1,1))
))
model = BaseAdaptDeep()
model.fit(dataset, Xt=dataset, validation_data=dataset.batch(10))
model.predict(tf.data.Dataset.from_tensor_slices(Xs).batch(32))
model.evaluate(dataset.batch(32))
def gens():
for i in range(40):
yield Xs[i], ys[i]
dataset = tf.data.Dataset.from_generator(gens,
output_shapes=([2], []),
output_types=("float32", "float32"))
model = BaseAdaptDeep()
model.fit(dataset, Xt=Xt, validation_data=dataset.batch(10))
model.predict(tf.data.Dataset.from_tensor_slices(Xs).batch(32))
model.evaluate(dataset.batch(32))
def _unpack_data_ms(self, data):
data_src = data[0]
data_tgt = data[1]
Xs = data_src[0][0]
ys = data_src[1][0]
if isinstance(data_tgt, tuple):
Xt = data_tgt[0]
yt = data_tgt[1]
return Xs, Xt, ys, yt
else:
Xt = data_tgt
return Xs, Xt, ys, None
def test_multisource():
np.random.seed(0)
model = BaseAdaptDeep()
model._unpack_data = _unpack_data_ms.__get__(model)
model.fit(Xs, ys, Xt=Xt, domains=np.random.choice(2, len(Xs)))
model.predict(Xs)
model.evaluate(Xs, ys)
assert model.n_sources_ == 2
def test_complete_batch():
model = BaseAdaptDeep(Xt=Xt[:3], metrics=[_custom_metric])
model.fit(Xs, ys, batch_size=120)
assert model.history_["cm"][0] == 120
model = BaseAdaptDeep(Xt=Xt[:10], yt=yt[:10], metrics=[_custom_metric])
model.fit(Xs[:23], ys[:23], batch_size=17, buffer_size=1024)
assert model.history_["cm"][0] == 17
assert model.total_steps_ == 2
dataset = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(Xs),
tf.data.Dataset.from_tensor_slices(ys.reshape(-1,1))
))
Xtt = tf.data.Dataset.from_tensor_slices(Xt)
model = BaseAdaptDeep(Xt=Xtt, metrics=[_custom_metric])
model.fit(dataset, batch_size=32, validation_data=dataset)
assert model.history_["cm"][0] == 32
model = BaseAdaptDeep(Xt=Xtt.batch(32), metrics=[_custom_metric])
model.fit(dataset.batch(32), batch_size=48, validation_data=dataset.batch(32))
assert model.history_["cm"][0] == 25
def gens():
for i in range(40):
yield Xs[i], ys[i]
dataset = tf.data.Dataset.from_generator(gens,
output_shapes=([2], []),
output_types=("float32", "float32"))
def gent():
for i in range(50):
yield Xs[i], ys[i]
dataset2 = tf.data.Dataset.from_generator(gent,
output_shapes=([2], []),
output_types=("float32", "float32"))
model = BaseAdaptDeep(metrics=[_custom_metric])
model.fit(dataset, Xt=dataset2, validation_data=dataset, batch_size=22)
assert model.history_["cm"][0] == 22
assert model.total_steps_ == 3
assert model.length_src_ == 40
assert model.length_tgt_ == 50
model.fit(dataset, Xt=dataset2, validation_data=dataset, batch_size=32)
assert model.total_steps_ == 2
assert model.history_["cm"][-1] == 32 | 9,849 | 31.943144 | 99 | py |
adapt | adapt-master/tests/test_finetuning.py | import numpy as np
import tensorflow as tf
from sklearn.base import clone
from adapt.utils import make_classification_da
from adapt.parameter_based import FineTuning
from tensorflow.keras.initializers import GlorotUniform
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
np.random.seed(0)
tf.random.set_seed(0)
encoder = tf.keras.Sequential()
encoder.add(tf.keras.layers.Dense(50, activation="relu", kernel_initializer=GlorotUniform(seed=0)))
encoder.add(tf.keras.layers.Dense(50, activation="relu", kernel_initializer=GlorotUniform(seed=0)))
task = tf.keras.Sequential()
task.add(tf.keras.layers.Dense(1, activation="sigmoid", kernel_initializer=GlorotUniform(seed=0)))
ind = np.random.choice(100, 10)
Xs, ys, Xt, yt = make_classification_da()
def test_finetune():
model = FineTuning(encoder=encoder, task=task, loss="bce", optimizer=Adam(), random_state=0)
model.fit(Xs, ys, epochs=100, verbose=0)
assert np.mean((model.predict(Xt).ravel()>0.5) == yt) < 0.7
fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
training=False,
loss="bce", optimizer=Adam(), random_state=0)
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() == 0.
assert np.mean((fine_tuned.predict(Xt).ravel()>0.5) == yt) > 0.6
assert np.mean((fine_tuned.predict(Xt).ravel()>0.5) == yt) < 0.8
fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
training=True,
loss="bce", optimizer=Adam(), random_state=0)
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() > 1.
assert np.mean((fine_tuned.predict(Xt).ravel()>0.5) == yt) > 0.9
fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
training=[True, False],
loss="bce", optimizer=Adam(), random_state=0)
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() == 0.
assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() > 1.
fine_tuned = FineTuning(encoder=model.encoder_, task=model.task_,
training=[False],
loss="bce", optimizer=Adam(), random_state=0)
fine_tuned.fit(Xt[ind], yt[ind], epochs=100, verbose=0)
assert np.abs(fine_tuned.encoder_.get_weights()[0] - model.encoder_.get_weights()[0]).sum() == 0.
assert np.abs(fine_tuned.encoder_.get_weights()[-1] - model.encoder_.get_weights()[-1]).sum() == 0
def test_finetune_pretrain():
model = FineTuning(encoder=encoder, task=task, pretrain=True, pretrain__epochs=2,
loss="bce", optimizer=Adam(), random_state=0)
model.fit(Xs, ys, epochs=1, verbose=0)
def test_clone():
model = FineTuning(encoder=encoder, task=task,
loss="bce", optimizer=Adam(), random_state=0)
model.fit(Xs, ys, epochs=1, verbose=0)
new_model = clone(model)
new_model.fit(Xs, ys, epochs=1, verbose=0)
new_model.predict(Xs);
assert model is not new_model
| 3,453 | 41.121951 | 102 | py |
adapt | adapt-master/tests/test_wdgrl.py | """
Test functions for wdgrl module.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Dense
try:
from tensorflow.keras.optimizers.legacy import Adam
except:
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import GlorotUniform
from adapt.feature_based import WDGRL
Xs = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.zeros((100, 1))
), axis=1)
Xt = np.concatenate((
np.linspace(0, 1, 100).reshape(-1, 1),
np.ones((100, 1))
), axis=1)
ys = 0.2 * Xs[:, 0].ravel()
yt = 0.2 * Xt[:, 0].ravel()
def _get_encoder(input_shape=Xs.shape[1:]):
model = Sequential()
model.add(Dense(1, input_shape=input_shape,
kernel_initializer="ones",
use_bias=False))
model.compile(loss="mse", optimizer="adam")
return model
def _get_discriminator(input_shape=(1,)):
model = Sequential()
model.add(Dense(10,
input_shape=input_shape,
kernel_initializer=GlorotUniform(seed=0),
activation="elu"))
model.add(Dense(1,
kernel_initializer=GlorotUniform(seed=0),
activation=None))
model.compile(loss="mse", optimizer="adam")
return model
def _get_task(input_shape=(1,), output_shape=(1,)):
model = Sequential()
model.add(Dense(np.prod(output_shape),
kernel_initializer=GlorotUniform(seed=0),
use_bias=False,
input_shape=input_shape))
model.compile(loss="mse", optimizer=Adam(0.1))
return model
def test_fit_lambda_zero():
tf.random.set_seed(1)
np.random.seed(1)
model = WDGRL(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=0, loss="mse", optimizer=Adam(0.01), metrics=["mse"],
random_state=0)
model.fit(Xs, ys, Xt, yt,
epochs=300, verbose=0)
assert isinstance(model, Model)
assert model.encoder_.get_weights()[0][1][0] == 1.0
assert np.sum(np.abs(model.predict(Xs).ravel() - ys)) < 0.01
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) > 10
def test_fit_lambda_one():
tf.random.set_seed(1)
np.random.seed(1)
model = WDGRL(_get_encoder(), _get_task(), _get_discriminator(),
lambda_=1, gamma=0, loss="mse", optimizer=Adam(0.01),
metrics=["mse"], random_state=0)
model.fit(Xs, ys, Xt, yt,
epochs=300, verbose=0)
assert isinstance(model, Model)
assert np.abs(model.encoder_.get_weights()[0][1][0] /
model.encoder_.get_weights()[0][0][0]) < 0.2
assert np.sum(np.abs(model.predict(Xs).ravel() - ys)) < 2
assert np.sum(np.abs(model.predict(Xt).ravel() - yt)) < 5
| 2,835 | 31.227273 | 78 | py |
adapt | adapt-master/tests/test_utils.py | """
Test functions for utils module.
"""
import copy
import numpy as np
import pytest
import tensorflow as tf
import tensorflow.keras.backend as K
from sklearn.linear_model import LinearRegression, LogisticRegression, Ridge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.compose import TransformedTargetRegressor
from sklearn.base import BaseEstimator, RegressorMixin, ClassifierMixin
from sklearn.tree._tree import Tree
# from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Flatten, Reshape
from tensorflow.python.keras.engine.input_layer import InputLayer
from adapt.utils import *
def is_equal_estimator(v1, v2):
assert type(v2) == type(v1)
if isinstance(v1, np.ndarray):
assert np.array_equal(v1, v2)
elif isinstance(v1, BaseEstimator): # KerasClassifier, KerasRegressor
assert is_equal_estimator(v1.__dict__, v2.__dict__)
elif isinstance(v1, Model):
assert is_equal_estimator(v1.get_config(),
v2.get_config())
elif isinstance(v1, dict):
diff_keys = ((set(v1.keys())-set(v2.keys())) |
(set(v2.keys())-set(v1.keys())))
for k in diff_keys:
assert "input_shape" in k
for k1_i, v1_i in v1.items():
# Avoid exception due to new input layer name
if k1_i != "name" and not "input_shape" in str(k1_i):
v2_i = v2[k1_i]
assert is_equal_estimator(v1_i, v2_i)
elif isinstance(v1, (list, tuple)):
assert len(v1) == len(v2)
for v1_i, v2_i in zip(v1, v2):
assert is_equal_estimator(v1_i, v2_i)
elif isinstance(v1, Tree):
pass # TODO create a function to check if two tree are equal
else:
if not "input" in str(v1):
assert v1 == v2
return True
class CustomEstimator(BaseEstimator):
def __init__(self):
pass
def fit(self, X, y):
pass
class DummyModel(Model):
def __init__(self):
super().__init__()
class CantBeDeepCopied(BaseEstimator):
def __init__(self):
pass
def __deepcopy__(self):
raise ValueError("Can not be deep copied!")
def _get_model_Model(compiled=True, custom_loss=False):
inputs = Input((10,))
output = Dense(1)(inputs)
model = Model(inputs, output)
if custom_loss:
loss = K.mean(output)
model.add_loss(loss)
if compiled:
model.compile(loss="mse", optimizer="adam")
return model
def _get_model_Sequential(input_shape=None, compiled=True):
model = Sequential()
if input_shape is not None:
model.add(Dense(1, input_shape=input_shape))
else:
model.add(Dense(1))
if compiled:
model.compile(loss="mse", optimizer="adam")
return model
arrays_nd = [np.ones((10, 1)), np.zeros((10, 10)),
np.zeros((10, 5, 1)), np.full((10, 20), -5.5),
np.ones((1, 1)), np.random.randn(1, 5, 5, 1)]
@pytest.mark.parametrize("z", arrays_nd)
def test_check_arrays_nd(z):
Xs, ys = check_arrays(z, z)
assert np.array_equal(Xs, z)
assert np.array_equal(ys, z)
def test_check_arrays_length_error():
z = arrays_nd[0]
with pytest.raises(ValueError) as excinfo:
Xs, ys = check_arrays(z, z[:5])
assert "Length of X and y mismatch: 10 != 5" in str(excinfo.value)
def test_check_arrays_no_array():
z = np.array([1,2,3])
with pytest.raises(ValueError) as excinfo:
Xs, ys = check_arrays("lala", z)
networks = [
_get_model_Model(compiled=True, custom_loss=False),
_get_model_Sequential(compiled=True, input_shape=(10,)),
_get_model_Sequential(compiled=True, input_shape=None),
_get_model_Model(compiled=False, custom_loss=False),
_get_model_Model(compiled=False, custom_loss=True),
_get_model_Sequential(compiled=False, input_shape=(10,)),
_get_model_Sequential(compiled=False, input_shape=None)
]
@pytest.mark.parametrize("net", networks)
def test_check_network_network(net):
new_net = check_network(net)
assert is_equal_estimator(new_net, net)
if net.built:
for i in range(len(net.get_weights())):
assert np.array_equal(net.get_weights()[i],
new_net.get_weights()[i])
net.predict(np.ones((10, 10)))
new_net = check_network(net)
assert is_equal_estimator(new_net, net)
for i in range(len(net.get_weights())):
assert np.array_equal(net.get_weights()[i],
new_net.get_weights()[i])
@pytest.mark.parametrize("net", networks)
def test_check_network_copy(net):
new_net = check_network(net, copy=True)
assert hex(id(new_net)) != hex(id(net))
new_net = check_network(net, copy=False)
assert hex(id(new_net)) == hex(id(net))
no_networks = ["lala", Ridge(), 123, np.ones((10, 10))]
@pytest.mark.parametrize("no_net", no_networks)
def test_check_network_no_model(no_net):
with pytest.raises(ValueError) as excinfo:
new_net = check_network(no_net)
assert ("Expected `network` argument "
"to be a `Model` instance,"
" got: %s"%str(no_net) in str(excinfo.value))
#def test_check_network_force_copy():
# model = DummyModel()
# with pytest.raises(ValueError) as excinfo:
# new_net = check_network(model, copy=True, force_copy=True)
# assert ("`network` argument can't be duplicated. "
# "Recorded exception: " in str(excinfo.value))
#
# new_net = check_network(model, copy=False, force_copy=True)
# def test_check_network_high_dataset():
# Xs, ys, Xt, yt = make_regression_da(100000, 1001)
# net = _get_model_Sequential(compiled=True)
# new_net = check_network(net, copy=True)
# new_net.predict(Xs)
estimators = [
Ridge(),
Ridge(alpha=10, fit_intercept=False, tol=0.1),
DecisionTreeClassifier(max_depth=10),
AdaBoostRegressor(Ridge(alpha=0.01)),
TransformedTargetRegressor(regressor=Ridge(alpha=25), transformer=StandardScaler()),
MultiOutputRegressor(Ridge(alpha=0.3)),
make_pipeline(StandardScaler(), Ridge(alpha=0.2)),
# KerasClassifier(_get_model_Sequential, input_shape=(1,)),
CustomEstimator()
]
@pytest.mark.parametrize("est", estimators)
def test_check_estimator_estimators(est):
new_est = check_estimator(est, copy=True, force_copy=True)
assert is_equal_estimator(est, new_est)
if isinstance(est, MultiOutputRegressor):
est.fit(np.linspace(0, 1, 10).reshape(-1, 1),
np.stack([np.linspace(0, 1, 10)<0.5]*2, -1).astype(float))
else:
est.fit(np.linspace(0, 1, 10).reshape(-1, 1),
(np.linspace(0, 1, 10)<0.5).astype(float))
# if isinstance(est, KerasClassifier):
# new_est = check_estimator(est, copy=False)
# else:
new_est = check_estimator(est, copy=True, force_copy=True)
assert is_equal_estimator(est, new_est)
@pytest.mark.parametrize("est", networks[:3])
def test_check_estimator_networks(est):
new_est = check_estimator(est)
assert is_equal_estimator(est, new_est)
no_estimators = ["lala", 123, np.ones((10, 10))]
@pytest.mark.parametrize("no_est", no_estimators)
def test_check_estimator_no_estimators(no_est):
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(no_est)
assert ("`estimator` argument is neither a sklearn `BaseEstimator` "
"instance nor a tensorflow Model instance. "
"Given argument, %s"%str(no_est) in str(excinfo.value))
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(no_est, display_name="tireli")
assert ("`tireli` argument is neither a sklearn `BaseEstimator` "
"instance nor a tensorflow Model instance. "
"Given argument, %s"%str(no_est) in str(excinfo.value))
@pytest.mark.parametrize("est", estimators)
def test_check_estimator_copy(est):
new_est = check_estimator(est, copy=True)
assert hex(id(new_est)) != hex(id(est))
new_est = check_estimator(est, copy=False)
assert hex(id(new_est)) == hex(id(est))
def test_check_estimator_force_copy():
est = CantBeDeepCopied()
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(est, copy=True, force_copy=True)
assert ("`estimator` argument can't be duplicated. "
"Recorded exception: " in str(excinfo.value))
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(est, copy=True, force_copy=True,
display_name="tireli")
assert ("`tireli` argument can't be duplicated. "
"Recorded exception: " in str(excinfo.value))
with pytest.warns(UserWarning) as record:
new_est = check_estimator(est, copy=True, force_copy=False)
assert ("`estimator` argument can't be duplicated. "
"Recorded exception: " in str(record[0].message))
with pytest.warns(UserWarning) as record:
new_est = check_estimator(est, copy=True, force_copy=False,
display_name="tireli")
assert ("`tireli` argument can't be duplicated. "
"Recorded exception: " in str(record[0].message))
new_est = check_estimator(est, copy=False, force_copy=True)
def test_check_estimator_task():
new_est = check_estimator()
assert isinstance(new_est, LinearRegression)
new_est = check_estimator(task="class")
assert isinstance(new_est, LogisticRegression)
new_est = check_estimator(DecisionTreeClassifier(),
task="class")
assert isinstance(new_est, DecisionTreeClassifier)
new_est = check_estimator(Ridge(),
task="reg")
assert isinstance(new_est, Ridge)
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(DecisionTreeClassifier(), task="reg")
assert ("`estimator` argument is a sklearn `ClassifierMixin` instance "
"whereas the considered object handles only regression task. "
"Please provide a sklearn `RegressionMixin` instance or a "
"tensorflow Model instance." in str(excinfo.value))
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(DecisionTreeClassifier(), task="reg",
display_name="tireli")
assert ("`tireli` argument is a sklearn"
" `ClassifierMixin` instance " in str(excinfo.value))
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(Ridge(), task="class")
assert ("`estimator` argument is a sklearn `RegressionMixin` instance "
"whereas the considered object handles only classification task. "
"Please provide a sklearn `ClassifierMixin` instance or a "
"tensorflow Model instance." in str(excinfo.value))
with pytest.raises(ValueError) as excinfo:
new_est = check_estimator(Ridge(), task="class",
display_name="tireli")
assert ("`tireli` argument is a sklearn"
" `RegressionMixin` instance " in str(excinfo.value))
def test_get_default_encoder():
model = get_default_encoder()
assert isinstance(model.layers[0], Flatten)
assert isinstance(model.layers[1], Dense)
assert model.layers[1].get_config()["units"] == 10
assert model.layers[1].get_config()["activation"] == "relu"
def test_get_default_task():
model = get_default_task()
assert isinstance(model.layers[0], Flatten)
assert isinstance(model.layers[1], Dense)
assert isinstance(model.layers[2], Dense)
assert isinstance(model.layers[3], Dense)
assert model.layers[1].get_config()["units"] == 10
assert model.layers[1].get_config()["activation"] == "relu"
assert model.layers[2].get_config()["units"] == 10
assert model.layers[2].get_config()["activation"] == "relu"
assert model.layers[3].get_config()["units"] == 1
assert model.layers[3].get_config()["activation"] == "linear"
def test_get_default_discriminator():
model = get_default_discriminator()
assert isinstance(model.layers[0], Flatten)
assert isinstance(model.layers[1], Dense)
assert isinstance(model.layers[2], Dense)
assert isinstance(model.layers[3], Dense)
assert model.layers[1].get_config()["units"] == 10
assert model.layers[1].get_config()["activation"] == "relu"
assert model.layers[2].get_config()["units"] == 10
assert model.layers[2].get_config()["activation"] == "relu"
assert model.layers[3].get_config()["units"] == 1
assert model.layers[3].get_config()["activation"] == "sigmoid"
scales = [-1, 0, 1., 0.1]
@pytest.mark.parametrize("lambda_", scales)
def test_gradienthandler(lambda_):
grad_handler = GradientHandler(lambda_)
inputs = K.variable([1, 2, 3])
assert np.all(grad_handler(inputs) == inputs)
with tf.GradientTape() as tape:
gradient = tape.gradient(grad_handler(inputs),
inputs)
assert np.all(gradient == lambda_ * np.ones(3))
config = grad_handler.get_config()
assert config['lambda_init'] == lambda_
def test_make_classification_da():
Xs, ys, Xt, yt = make_classification_da()
assert Xs.shape == (100, 2)
assert len(ys) == 100
assert Xt.shape == (100, 2)
assert len(yt) == 100
Xs, ys, Xt, yt = make_classification_da(1000, 10)
assert Xs.shape == (1000, 10)
assert len(ys) == 1000
assert Xt.shape == (1000, 10)
assert len(yt) == 1000
def test_make_regression_da():
Xs, ys, Xt, yt = make_regression_da()
assert Xs.shape == (100, 1)
assert len(ys) == 100
assert Xt.shape == (100, 1)
assert len(yt) == 100
Xs, ys, Xt, yt = make_regression_da(1000, 10)
assert Xs.shape == (1000, 10)
assert len(ys) == 1000
assert Xt.shape == (1000, 10)
assert len(yt) == 1000
def test_accuracy():
y_true = tf.Variable([[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]],
dtype="float32")
y_pred = tf.Variable([[0.5, 0.3, 0.2],
[0.9, 0.1, 0.],
[0.6, 0.1, 0.3],
[0.1, 0.7, 0.2]],
dtype="float32")
acc = accuracy(y_true, y_pred)
assert np.all(np.array([0, 1, 1, 0]) == acc.numpy())
y_true = tf.Variable([[0], [1], [0]],
dtype="float32")
y_pred = tf.Variable([[0.6], [0.3], [0.2]],
dtype="float32")
acc = accuracy(y_true, y_pred)
assert np.all(np.array([0, 0, 1]) == acc.numpy())
def test_updatelambda():
up = UpdateLambda()
dummy = DummyModel()
dummy.lambda_ = tf.Variable(0.)
up.model = dummy
for _ in range(1000):
up.on_batch_end(0, None)
assert dummy.lambda_.numpy() == 1.
def test_check_fitted_estimator():
X = np.random.randn(10, 3)
y = np.random.randn(10)
model = LinearRegression()
model.fit(X, y)
new_model = check_fitted_estimator(model)
assert new_model is not model
assert new_model.__class__.__name__ == "FittedLinearRegression"
new_model2 = check_fitted_estimator(new_model)
assert new_model2 is new_model
new_model3 = new_model.__class__(fit_intercept=False)
assert new_model3 is not new_model
assert np.all(new_model3.coef_ == model.coef_)
assert new_model3.fit_intercept
def test_check_fitted_network():
X = np.random.randn(10, 3)
y = np.random.randn(10)
model = _get_model_Sequential()
model.fit(X, y)
new_model = check_fitted_network(model)
assert new_model is model
new_model2 = copy.deepcopy(model)
assert new_model2 is model
new_model = check_fitted_network(None)
assert new_model is None
| 16,182 | 34.567033 | 88 | py |
adapt | adapt-master/adapt/base.py | """
Base for adapt
"""
import warnings
import inspect
from copy import deepcopy
import numpy as np
import tensorflow as tf
from sklearn.base import BaseEstimator
from sklearn.utils import check_array
from sklearn.metrics.pairwise import KERNEL_PARAMS
from sklearn.exceptions import NotFittedError
from tensorflow.keras import Model
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
try:
from tensorflow.keras.optimizers.legacy import RMSprop
except:
from tensorflow.keras.optimizers import RMSprop
from adapt.utils import (check_estimator,
check_network,
check_arrays,
set_random_seed,
check_sample_weight,
accuracy,
get_default_encoder,
get_default_task,
get_default_discriminator)
from adapt.metrics import normalized_linear_discrepancy
base_doc_est = dict(
estimator="""
estimator : sklearn estimator or tensorflow Model (default=None)
Estimator used to learn the task.
If estimator is ``None``, a ``LinearRegression``
instance is used as estimator.
""",
encoder="""
encoder : tensorflow Model (default=None)
Encoder netwok. If ``None``, a shallow network with 10
neurons and ReLU activation is used as encoder network.
""",
task="""
task : tensorflow Model (default=None)
Task netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as task network.
""",
discriminator="""
discriminator : tensorflow Model (default=None)
Discriminator netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as discriminator
network. Note that the output shape of the discriminator should
be ``(None, 1)`` and a ``sigmoid`` activation should be used.
""",
weighter="""
weighter : tensorflow Model (default=None)
Encoder netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as
weighter network.
"""
)
base_doc_Xt = """
Xt : numpy array (default=None)
Target input data.
"""
base_doc_Xt_yt = """
Xt : numpy array (default=None)
Target input data.
yt : numpy array (default=None)
Target output data.
"""
base_doc_2 ="""
copy : boolean (default=True)
Whether to make a copy of ``estimator`` or not.
verbose : int (default=1)
Verbosity level.
random_state : int (default=None)
Seed of random generator.
params : key, value arguments
Arguments given at the different level of the adapt object.
It can be, for instance, compile or fit parameters of the
estimator or kernel parameters etc...
Accepted parameters can be found by calling the method
``_get_legal_params(params)``.
"""
base_doc_other_params="""
Yields
------
optimizer : str or instance of tf.keras.optimizers (default="rmsprop")
Optimizer for the task. It should be an
instance of tf.keras.optimizers as:
``tf.keras.optimizers.SGD(0.001)`` or
``tf.keras.optimizers.Adam(lr=0.001, beta_1=0.5)``.
A string can also be given as ``"adam"``.
Default optimizer is ``rmsprop``.
loss : str or instance of tf.keras.losses (default="mse")
Loss for the task. It should be an
instance of tf.keras.losses as:
``tf.keras.losses.MeanSquaredError()`` or
``tf.keras.losses.CategoricalCrossentropy()``.
A string can also be given as ``"mse"`` or
``categorical_crossentropy``.
Default loss is ``mse``.
metrics : list of str or list of tf.keras.metrics.Metric instance
List of metrics to be evaluated by the model during training
and testing. Typically you will use ``metrics=['accuracy']``.
optimizer_enc : str or instance of tf.keras.optimizers
If the Adapt Model has an ``encoder`` attribute,
a specific optimizer for the ``encoder`` network can
be given. Typically, this parameter can be used to
give a smaller learning rate to the encoder.
If not specified, ``optimizer_enc=optimizer``.
optimizer_disc : str or instance of tf.keras.optimizers
If the Adapt Model has a ``discriminator`` attribute,
a specific optimizer for the ``discriminator`` network can
be given. If not specified, ``optimizer_disc=optimizer``.
kwargs : key, value arguments
Any arguments of the ``fit`` method from the Tensorflow
Model can be given, as ``epochs`` and ``batch_size``.
Specific arguments from ``optimizer`` can also be given
as ``learning_rate`` or ``beta_1`` for ``Adam``.
This allows to perform ``GridSearchCV`` from scikit-learn
on these arguments.
"""
def make_insert_doc(estimators=["estimator"], supervised=False):
"""
Abstract for adding common parameters
to the docstring
Parameters
----------
estimators : list (default=['estimator'])
list of estimators docstring to add.
Returns
-------
func
"""
def insert_base_doc(func):
# Change signature of Deep Model
if "BaseAdaptDeep" in func.__bases__[0].__name__:
sign = inspect.signature(func.__init__)
parameters = dict(sign.parameters)
parameters.pop("self", None)
sign = sign.replace(parameters=list(parameters.values()))
func.__signature__ = sign
doc = func.__doc__
if "Parameters" in doc:
splits = doc.split("Parameters")
n_count = 0
i = 0
while (i<len(splits[1])) and (n_count<2):
char = splits[1][i]
if char == "\n":
n_count+=1
i+=1
j = i
word = ""
while (j<len(splits[1])) and (word != "---"):
j+=1
word = splits[1][j:j+3]
if word == "---":
n_count = 0
while (j<len(splits[1])) and (n_count<2):
char = splits[1][j]
if char == "\n":
n_count+=1
j-=1
doc_est = ""
for est in estimators:
doc_est += base_doc_est[est]
if supervised:
doc_1 = base_doc_Xt_yt
else:
doc_1 = base_doc_Xt
doc_2 = base_doc_2
if "BaseAdaptDeep" in func.__bases__[0].__name__:
doc_2 += base_doc_other_params
splits[1] = (
splits[1][:i-1]+
doc_est+doc_1+
splits[1][i-1:j+1]+
doc_2+
splits[1][j+1:]
)
new_doc = splits[0]+"Parameters"+splits[1]
else:
new_doc = doc
func.__doc__ = new_doc
return func
return insert_base_doc
class BaseAdapt:
def get_params(self, deep=True):
"""
Get parameters for this estimator.
Parameters
----------
deep : bool, default=True
Not used, here for scikit-learn compatibility.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
out = dict()
legal_params = self._get_legal_params(self.__dict__)
params_names = set(self.__dict__) & set(legal_params)
for key in params_names:
value = getattr(self, key)
out[key] = value
return out
def set_params(self, **params):
"""
Set the parameters of this estimator.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
return self
self._check_params(params)
for key, value in params.items():
setattr(self, key, value)
return self
def unsupervised_score(self, Xs, Xt):
"""
Return unsupervised score.
The normalized discrepancy distance is computed
between the reweighted/transformed source input
data and the target input data.
Parameters
----------
Xs : array
Source input data.
Xt : array
Source input data.
Returns
-------
score : float
Unsupervised score.
"""
Xs = check_array(Xs, accept_sparse=True)
Xt = check_array(Xt, accept_sparse=True)
if hasattr(self, "transform"):
args = [
p.name
for p in inspect.signature(self.transform).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "domain" in args:
Xt = self.transform(Xt, domain="tgt")
Xs = self.transform(Xs, domain="src")
else:
Xt = self.transform(Xt)
Xs = self.transform(Xs)
elif hasattr(self, "predict_weights"):
sample_weight = self.predict_weights()
sample_weight = sample_weight
sample_weight = check_sample_weight(sample_weight, Xs)
sample_weight /= sample_weight.sum()
set_random_seed(self.random_state)
bootstrap_index = np.random.choice(
Xs.shape[0], size=Xs.shape[0], replace=True, p=sample_weight)
Xs = Xs[bootstrap_index]
else:
raise ValueError("The Adapt model should implement"
" a transform or predict_weights methods")
return normalized_linear_discrepancy(Xs, Xt)
def _check_params(self, params):
legal_params = self._get_legal_params(params)
for key in params:
if not key in legal_params:
raise ValueError("%s is not a legal params for %s model. "
"Legal params are: %s"%
(key, self.__class__.__name__, str(legal_params)))
def _get_param_names(self):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(self.__init__, "deprecated_original", self.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [
p
for p in init_signature.parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError(
"scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention." % ("dummy", init_signature)
)
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def _filter_params(self, func, override={}, prefix=""):
kwargs = {}
args = [
p.name
for p in inspect.signature(func).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
for key, value in self.__dict__.items():
new_key = key.replace(prefix+"__", "")
if new_key in args and prefix in key:
kwargs[new_key] = value
kwargs.update(override)
return kwargs
def _get_target_data(self, X, y):
if X is None:
X = self.Xt
if y is None:
y = self.yt
if X is None:
raise ValueError("Argument `Xt` should be given in `fit`"
" when `self.Xt` is None.")
return X, y
def _check_domains(self, domains):
domains = check_array(domains, ensure_2d=False)
if len(domains.shape) > 1:
raise ValueError("`domains` should be 1D array")
self._domains_dict = {}
new_domains = np.zeros(len(domains))
unique = np.unique(domains)
for dom, i in zip(unique, range(len(unique))):
new_domains[domains==dom] = i
self._domains_dict[i] = dom
return new_domains
class BaseAdaptEstimator(BaseAdapt, BaseEstimator):
def __init__(self,
estimator=None,
Xt=None,
yt=None,
verbose=1,
copy=True,
random_state=None,
**params):
set_random_seed(random_state)
self.estimator = estimator
self.Xt = Xt
self.yt = yt
self.verbose = verbose
self.random_state = random_state
self.copy = copy
self._check_params(params)
for key, value in params.items():
setattr(self, key, value)
def fit(self, X, y, Xt=None, yt=None, domains=None, **fit_params):
"""
Fit Adapt Model.
For feature-based models, the transformation of the
input features ``Xs`` and ``Xt`` is first fitted. In a second
stage, the ``estimator_`` is fitted on the transformed features.
For instance-based models, source importance weights are
first learned based on ``Xs, ys`` and ``Xt``. In a second
stage, the ``estimator_`` is fitted on ``Xs, ys`` with the learned
importance weights.
Parameters
----------
X : numpy array
Source input data.
y : numpy array
Source output data.
Xt : array (default=None)
Target input data. If None, the `Xt` argument
given in `init` is used.
yt : array (default=None)
Target input data. Only needed for supervised
and semi-supervised Adapt model.
If None, the `yt` argument given in `init` is used.
domains : array (default=None)
Vector giving the domain for each source
data. Can be used for multisource purpose.
fit_params : key, value arguments
Arguments given to the fit method of
the estimator.
Returns
-------
self : returns an instance of self
"""
Xt, yt = self._get_target_data(Xt, yt)
X, y = check_arrays(X, y)
self.n_features_in_ = X.shape[1]
if yt is not None:
Xt, yt = check_arrays(Xt, yt)
else:
Xt = check_array(Xt, ensure_2d=True, allow_nd=True)
set_random_seed(self.random_state)
self.n_features_in_ = X.shape[1]
if hasattr(self, "fit_weights"):
if self.verbose:
print("Fit weights...")
out = self.fit_weights(Xs=X, Xt=Xt,
ys=y, yt=yt,
domains=domains)
if isinstance(out, tuple):
self.weights_ = out[0]
X = out[1]
y = out[2]
else:
self.weights_ = out
if "sample_weight" in fit_params:
fit_params["sample_weight"] *= self.weights_
else:
fit_params["sample_weight"] = self.weights_
elif hasattr(self, "fit_transform"):
if self.verbose:
print("Fit transform...")
out = self.fit_transform(Xs=X, Xt=Xt,
ys=y, yt=yt,
domains=domains)
if isinstance(out, tuple):
X = out[0]
y = out[1]
else:
X = out
if self.verbose:
print("Fit Estimator...")
self.fit_estimator(X, y, **fit_params)
return self
def fit_estimator(self, X, y, sample_weight=None,
random_state=None, warm_start=True,
**fit_params):
"""
Fit estimator on X, y.
Parameters
----------
X : array
Input data.
y : array
Output data.
sample_weight : array
Importance weighting.
random_state : int (default=None)
Seed of the random generator
warm_start : bool (default=True)
If True, continue to fit ``estimator_``,
else, a new estimator is fitted based on
a copy of ``estimator``. (Be sure to set
``copy=True`` to use ``warm_start=False``)
fit_params : key, value arguments
Arguments given to the fit method of
the estimator and to the compile method
for tensorflow estimator.
Returns
-------
estimator_ : fitted estimator
"""
X, y = check_arrays(X, y, accept_sparse=True)
set_random_seed(random_state)
if (not warm_start) or (not hasattr(self, "estimator_")):
estimator = self.estimator
self.estimator_ = check_estimator(estimator,
copy=self.copy,
force_copy=True)
if isinstance(self.estimator_, Model):
compile_params = self._filter_params(self.estimator_.compile)
if not "loss" in compile_params:
if estimator._is_compiled:
compile_params["loss"] = deepcopy(estimator.loss)
else:
raise ValueError("The given `estimator` argument"
" is not compiled yet. "
"Please give a compiled estimator or "
"give a `loss` and `optimizer` arguments.")
if not "optimizer" in compile_params:
if estimator._is_compiled:
compile_params["optimizer"] = deepcopy(estimator.optimizer)
else:
if not isinstance(compile_params["optimizer"], str):
optim_params = self._filter_params(
compile_params["optimizer"].__init__)
if len(optim_params) > 0:
kwargs = compile_params["optimizer"].get_config()
kwargs.update(optim_params)
optimizer = compile_params["optimizer"].__class__(**kwargs)
else:
optimizer = compile_params["optimizer"]
compile_params["optimizer"] = optimizer
self.estimator_.compile(**compile_params)
fit_params = self._filter_params(self.estimator_.fit, fit_params)
fit_args = [
p.name
for p in inspect.signature(self.estimator_.fit).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "sample_weight" in fit_args:
sample_weight = check_sample_weight(sample_weight, X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.estimator_.fit(X, y,
sample_weight=sample_weight,
**fit_params)
else:
if sample_weight is None:
self.estimator_.fit(X, y, **fit_params)
else:
sample_weight = check_sample_weight(sample_weight, X)
sample_weight /= sample_weight.sum()
bootstrap_index = np.random.choice(
len(X), size=len(X), replace=True,
p=sample_weight)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.estimator_.fit(X[bootstrap_index],
y[bootstrap_index],
**fit_params)
return self.estimator_
def predict_estimator(self, X, **predict_params):
"""
Return estimator predictions for X.
Parameters
----------
X : array
input data
Returns
-------
y_pred : array
prediction of estimator.
"""
X = check_array(X, ensure_2d=True, allow_nd=True, accept_sparse=True)
predict_params = self._filter_params(self.estimator_.predict,
predict_params)
return self.estimator_.predict(X, **predict_params)
def predict(self, X, domain=None, **predict_params):
"""
Return estimator predictions after
adaptation.
For feature-based method (object which implements
a ``transform`` method), the input feature ``X``
are first transformed. Then the ``predict`` method
of the fitted estimator ``estimator_`` is applied
on the transformed ``X``.
Parameters
----------
X : array
input data
domain : str (default=None)
For antisymetric feature-based method,
different transformation of the input X
are applied for different domains. The domain
should then be specified between "src" and "tgt".
If ``None`` the default transformation is the
target one.
Returns
-------
y_pred : array
prediction of the Adapt Model.
"""
X = check_array(X, ensure_2d=True, allow_nd=True, accept_sparse=True)
if hasattr(self, "transform"):
if domain is None:
domain = "tgt"
args = [
p.name
for p in inspect.signature(self.transform).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "domain" in args:
X = self.transform(X, domain=domain)
else:
X = self.transform(X)
return self.predict_estimator(X, **predict_params)
def score(self, X, y, sample_weight=None, domain=None):
"""
Return the estimator score.
If the object has a ``transform`` method, the
estimator is applied on the transformed
features X. For antisymetric transformation,
a parameter domain can be set to specified
between source and target transformation.
Call `score` on sklearn estimator and
`evaluate` on tensorflow Model.
Parameters
----------
X : array
input data
y : array
output data
sample_weight : array (default=None)
Sample weights
domain : str (default=None)
This parameter specifies for antisymetric
feature-based method which transformation
will be applied between "source" and "target".
If ``None`` the transformation by default is
the target one.
Returns
-------
score : float
estimator score.
"""
X, y = check_arrays(X, y, accept_sparse=True)
if domain is None:
domain = "target"
if hasattr(self, "transform"):
args = [
p.name
for p in inspect.signature(self.transform).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "domain" in args:
X = self.transform(X, domain=domain)
else:
X = self.transform(X)
if hasattr(self.estimator_, "score"):
score = self.estimator_.score(X, y, sample_weight)
elif hasattr(self.estimator_, "evaluate"):
if np.prod(X.shape) <= 10**8:
score = self.estimator_.evaluate(
X, y,
sample_weight=sample_weight,
batch_size=len(X)
)
else:
score = self.estimator_.evaluate(
X, y,
sample_weight=sample_weight
)
if isinstance(score, (tuple, list)):
score = score[0]
else:
raise ValueError("Estimator does not implement"
" score or evaluate method")
return score
def _get_legal_params(self, params):
# Warning: additional fit and compile parameters can be given in set_params
# thus, we should also check estimator, optimizer in __dict__
legal_params_fct = [self.__init__]
if "estimator" in params:
estimator = params["estimator"]
else:
estimator = self.estimator
if hasattr(estimator, "fit"):
legal_params_fct.append(estimator.fit)
if hasattr(estimator, "predict"):
legal_params_fct.append(estimator.predict)
if isinstance(estimator, Model):
legal_params_fct.append(estimator.compile)
if "optimizer" in params:
optimizer = params["optimizer"]
elif hasattr(self, "optimizer"):
optimizer = self.optimizer
else:
optimizer = None
if (optimizer is not None) and (not isinstance(optimizer, str)):
legal_params_fct.append(optimizer.__init__)
legal_params = ["domain"]
for func in legal_params_fct:
args = [
p.name
for p in inspect.signature(func).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
legal_params = legal_params + args
# Add kernel params for kernel based algorithm
if "kernel" in params:
kernel = params["kernel"]
elif hasattr(self, "kernel"):
kernel = self.kernel
else:
kernel = None
if kernel is not None:
legal_params += list(KERNEL_PARAMS[kernel])
legal_params = set(legal_params)
legal_params.discard("self")
return legal_params
def __getstate__(self):
dict_ = {k: v for k, v in self.__dict__.items()}
if "estimator_" in dict_:
if isinstance(dict_["estimator_"], Model):
dict_["estimator_"] = self._get_config_keras_model(
dict_["estimator_"]
)
if "estimators_" in dict_:
for i in range(len(dict_["estimators_"])):
if isinstance(dict_["estimators_"][i], Model):
dict_["estimators_"][i] = self._get_config_keras_model(
dict_["estimators_"][i]
)
if "estimator" in dict_:
if isinstance(dict_["estimator"], Model):
dict_["estimator"] = self._get_config_keras_model(
dict_["estimator"]
)
return dict_
def __setstate__(self, dict_):
if "estimator_" in dict_:
if isinstance(dict_["estimator_"], dict):
dict_["estimator_"] = self._from_config_keras_model(
dict_["estimator_"]
)
if "estimators_" in dict_:
for i in range(len(dict_["estimators_"])):
if isinstance(dict_["estimators_"][i], dict):
dict_["estimators_"][i] = self._from_config_keras_model(
dict_["estimators_"][i]
)
if "estimator" in dict_:
if isinstance(dict_["estimator"], dict):
dict_["estimator"] = self._from_config_keras_model(
dict_["estimator"]
)
self.__dict__ = {k: v for k, v in dict_.items()}
def _get_config_keras_model(self, model):
if hasattr(model, "input_shape") or model.built:
weights = model.get_weights()
else:
weights = None
config = model.get_config()
klass = model.__class__
config = dict(weights=weights,
config=config,
klass=klass)
if hasattr(model, "loss"):
config["loss"] = model.loss
if hasattr(model, "optimizer"):
try:
config["optimizer_klass"] = model.optimizer.__class__
config["optimizer_config"] = model.optimizer.get_config()
except:
pass
return config
def _from_config_keras_model(self, dict_):
weights = dict_["weights"]
config = dict_["config"]
klass = dict_["klass"]
model = klass.from_config(config)
if weights is not None:
model.set_weights(weights)
if "loss" in dict_ and "optimizer_klass" in dict_:
loss = dict_["loss"]
optimizer = dict_["optimizer_klass"].from_config(
dict_["optimizer_config"])
try:
model.compile(loss=loss, optimizer=optimizer)
except:
print("Unable to compile model")
return model
class BaseAdaptDeep(Model, BaseAdapt):
def __init__(self,
encoder=None,
task=None,
discriminator=None,
Xt=None,
yt=None,
verbose=1,
copy=True,
random_state=None,
**params):
super().__init__()
self._self_setattr_tracking = False
self.encoder = encoder
self.task = task
self.discriminator = discriminator
self.Xt = Xt
self.yt = yt
self.verbose = verbose
self.copy = copy
self.random_state = random_state
self._check_params(params)
for key, value in params.items():
if key == "metrics":
key = "_adapt_metrics"
setattr(self, key, value)
self._self_setattr_tracking = True
def fit(self, X, y=None, Xt=None, yt=None, domains=None, **fit_params):
"""
Fit Model. Note that ``fit`` does not reset
the model but extend the training.
Notice also that the compile method will be called
if the model has not been compiled yet.
Parameters
----------
X : array or Tensor
Source input data.
y : array or Tensor (default=None)
Source output data.
Xt : array (default=None)
Target input data. If None, the `Xt` argument
given in `init` is used.
yt : array (default=None)
Target input data. Only needed for supervised
and semi-supervised Adapt model.
If None, the `yt` argument given in `init` is used.
domains : array (default=None)
Vector giving the domain for each source
data. Can be used for multisource purpose.
fit_params : key, value arguments
Arguments given to the fit method of the model
(epochs, batch_size, callbacks...).
Returns
-------
self : returns an instance of self
"""
set_random_seed(self.random_state)
# 1. Get Fit params
fit_params = self._filter_params(super().fit, fit_params)
verbose = fit_params.get("verbose", 1)
epochs = fit_params.get("epochs", 1)
batch_size = fit_params.pop("batch_size", 32)
shuffle = fit_params.pop("shuffle", True)
buffer_size = fit_params.pop("buffer_size", None)
validation_data = fit_params.pop("validation_data", None)
validation_split = fit_params.pop("validation_split", 0.)
validation_batch_size = fit_params.get("validation_batch_size", batch_size)
# 2. Prepare datasets
### 2.1 Source
if not isinstance(X, tf.data.Dataset):
check_arrays(X, y)
if len(y.shape) <= 1:
y = y.reshape(-1, 1)
# Single source
if domains is None:
self.n_sources_ = 1
dataset_Xs = tf.data.Dataset.from_tensor_slices(X)
dataset_ys = tf.data.Dataset.from_tensor_slices(y)
# Multisource
else:
domains = self._check_domains(domains)
self.n_sources_ = int(np.max(domains)+1)
sizes = [np.sum(domains==dom)
for dom in range(self.n_sources_)]
max_size = np.max(sizes)
repeats = np.ceil(max_size/sizes)
dataset_Xs = tf.data.Dataset.zip(tuple(
tf.data.Dataset.from_tensor_slices(X[domains==dom]).repeat(repeats[dom])
for dom in range(self.n_sources_))
)
dataset_ys = tf.data.Dataset.zip(tuple(
tf.data.Dataset.from_tensor_slices(y[domains==dom]).repeat(repeats[dom])
for dom in range(self.n_sources_))
)
dataset_src = tf.data.Dataset.zip((dataset_Xs, dataset_ys))
else:
dataset_src = X
### 2.2 Target
Xt, yt = self._get_target_data(Xt, yt)
if not isinstance(Xt, tf.data.Dataset):
if yt is None:
check_array(Xt, ensure_2d=True, allow_nd=True)
dataset_tgt = tf.data.Dataset.from_tensor_slices(Xt)
else:
check_arrays(Xt, yt)
if len(yt.shape) <= 1:
yt = yt.reshape(-1, 1)
dataset_Xt = tf.data.Dataset.from_tensor_slices(Xt)
dataset_yt = tf.data.Dataset.from_tensor_slices(yt)
dataset_tgt = tf.data.Dataset.zip((dataset_Xt, dataset_yt))
else:
dataset_tgt = Xt
# 3. Initialize networks
if not hasattr(self, "_is_fitted"):
self._is_fitted = True
self._initialize_networks()
if isinstance(Xt, tf.data.Dataset):
first_elem = next(iter(Xt))
if not isinstance(first_elem, tuple):
shape = first_elem.shape
else:
shape = first_elem[0].shape
if self._check_for_batch(Xt):
shape = shape[1:]
else:
shape = Xt.shape[1:]
self._initialize_weights(shape)
# 3.5 Get datasets length
self.length_src_ = self._get_length_dataset(dataset_src, domain="src")
self.length_tgt_ = self._get_length_dataset(dataset_tgt, domain="tgt")
# 4. Prepare validation dataset
if validation_data is None and validation_split>0.:
if shuffle:
dataset_src = dataset_src.shuffle(buffer_size=self.length_src_,
reshuffle_each_iteration=False)
frac = int(self.length_src_*validation_split)
validation_data = dataset_src.take(frac)
dataset_src = dataset_src.skip(frac)
if not self._check_for_batch(validation_data):
validation_data = validation_data.batch(validation_batch_size)
if validation_data is not None:
if isinstance(validation_data, tf.data.Dataset):
if not self._check_for_batch(validation_data):
validation_data = validation_data.batch(validation_batch_size)
# 5. Set datasets
# Same length for src and tgt + complete last batch + shuffle
if shuffle:
if buffer_size is None:
dataset_src = dataset_src.shuffle(buffer_size=self.length_src_,
reshuffle_each_iteration=True)
dataset_tgt = dataset_tgt.shuffle(buffer_size=self.length_tgt_,
reshuffle_each_iteration=True)
else:
dataset_src = dataset_src.shuffle(buffer_size=buffer_size,
reshuffle_each_iteration=True)
dataset_tgt = dataset_tgt.shuffle(buffer_size=buffer_size,
reshuffle_each_iteration=True)
max_size = max(self.length_src_, self.length_tgt_)
max_size = np.ceil(max_size / batch_size) * batch_size
repeat_src = np.ceil(max_size/self.length_src_)
repeat_tgt = np.ceil(max_size/self.length_tgt_)
dataset_src = dataset_src.repeat(repeat_src).take(max_size)
dataset_tgt = dataset_tgt.repeat(repeat_tgt).take(max_size)
self.total_steps_ = float(np.ceil(max_size/batch_size)*epochs)
# 5. Pretraining
if not hasattr(self, "pretrain_"):
if not hasattr(self, "pretrain"):
self.pretrain_ = False
else:
self.pretrain_ = self.pretrain
if self.pretrain_:
if self._is_compiled:
warnings.warn("The model has already been compiled. "
"To perform pretraining, the model will be "
"compiled again. Please make sure to pass "
"the compile parameters in __init__ to avoid errors.")
compile_params = self._filter_params(super().compile, prefix="pretrain")
self.compile(**compile_params)
if not hasattr(self, "pretrain_history_"):
self.pretrain_history_ = {}
prefit_params = self._filter_params(super().fit, prefix="pretrain")
pre_verbose = prefit_params.pop("verbose", verbose)
pre_epochs = prefit_params.pop("epochs", epochs)
pre_batch_size = prefit_params.pop("batch_size", batch_size)
prefit_params.pop("validation_data", None)
# !!! shuffle is already done
dataset = tf.data.Dataset.zip((dataset_src, dataset_tgt))
if not self._check_for_batch(dataset):
dataset = dataset.batch(pre_batch_size)
hist = super().fit(dataset, validation_data=validation_data,
epochs=pre_epochs, verbose=pre_verbose, **prefit_params)
for k, v in hist.history.items():
self.pretrain_history_[k] = self.pretrain_history_.get(k, []) + v
self._initialize_pretain_networks()
# 6. Compile
if (not self._is_compiled) or (self.pretrain_):
self.compile()
if not hasattr(self, "history_"):
self.history_ = {}
# .7 Training
dataset = tf.data.Dataset.zip((dataset_src, dataset_tgt))
if not self._check_for_batch(dataset):
dataset = dataset.batch(batch_size)
self.pretrain_ = False
hist = super().fit(dataset, validation_data=validation_data, **fit_params)
for k, v in hist.history.items():
self.history_[k] = self.history_.get(k, []) + v
return self
def compile(self,
optimizer=None,
loss=None,
metrics=None,
loss_weights=None,
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs):
"""
Configures the model for training.
Parameters
----------
optimizer: str or `tf.keras.optimizer` instance
Optimizer
loss: str or `tf.keras.losses.Loss` instance
Loss function. A loss function is any callable
with the signature `loss = fn(y_true, y_pred)`,
where `y_true` are the ground truth values, and
`y_pred` are the model's predictions.
`y_true` should have shape
`(batch_size, d0, .. dN)` (except in the case of
sparse loss functions such as
sparse categorical crossentropy which expects integer arrays of shape
`(batch_size, d0, .. dN-1)`).
`y_pred` should have shape `(batch_size, d0, .. dN)`.
The loss function should return a float tensor.
If a custom `Loss` instance is
used and reduction is set to `None`, return value has shape
`(batch_size, d0, .. dN-1)` i.e. per-sample or per-timestep loss
values; otherwise, it is a scalar. If the model has multiple outputs,
you can use a different loss on each output by passing a dictionary
or a list of losses. The loss value that will be minimized by the
model will then be the sum of all individual losses, unless
`loss_weights` is specified.
metrics: list of str or list of `tf.keras.metrics.Metric` instance
List of metrics to be evaluated by the model during training
and testing. Typically you will use `metrics=['accuracy']`. A
function is any callable with the signature `result = fn(y_true,
y_pred)`. To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary, such as
`metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.
You can also pass a list to specify a metric or a list of metrics
for each output, such as `metrics=[['accuracy'], ['accuracy', 'mse']]`
or `metrics=['accuracy', ['accuracy', 'mse']]`. When you pass the
strings 'accuracy' or 'acc', we convert this to one of
`tf.keras.metrics.BinaryAccuracy`,
`tf.keras.metrics.CategoricalAccuracy`,
`tf.keras.metrics.SparseCategoricalAccuracy` based on the loss
function used and the model output shape. We do a similar
conversion for the strings 'crossentropy' and 'ce' as well.
loss_weights: List or dict of floats
Scalars to weight the loss contributions of different model
outputs. The loss value that will be minimized by the model will then
be the *weighted sum* of all individual losses, weighted by the
`loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping to the model's
outputs. If a dict, it is expected to map output names (strings)
to scalar coefficients.
weighted_metrics: list of metrics
List of metrics to be evaluated and weighted by
`sample_weight` or `class_weight` during training and testing.
run_eagerly: bool (default=False)
If `True`, this `Model`'s logic will not be wrapped
in a `tf.function`. Recommended to leave
this as `None` unless your `Model` cannot be run inside a
`tf.function`. `run_eagerly=True` is not supported when using
`tf.distribute.experimental.ParameterServerStrategy`.
steps_per_execution: int (default=1)
The number of batches to run during each
`tf.function` call. Running multiple batches
inside a single `tf.function` call can greatly improve performance
on TPUs or small models with a large Python overhead.
At most, one full epoch will be run each
execution. If a number larger than the size of the epoch is passed,
the execution will be truncated to the size of the epoch.
Note that if `steps_per_execution` is set to `N`,
`Callback.on_batch_begin` and `Callback.on_batch_end` methods
will only be called every `N` batches
(i.e. before/after each `tf.function` execution).
**kwargs: key, value arguments
Arguments supported for backwards compatibility only.
Returns
-------
None: None
"""
if hasattr(self, "_adapt_metrics") and metrics is None:
metrics = self._adapt_metrics
if not isinstance(metrics, (list, dict, type(None))):
raise ValueError("`metrics` argument should be an instance "
"of dict or list")
if isinstance(metrics, dict):
metrics_disc = metrics.get("disc")
metrics_task = metrics.get("task")
else:
metrics_disc = metrics
metrics_task = metrics
if metrics_disc is None:
metrics_disc = []
self.disc_metrics = [tf.keras.metrics.get(m) for m in metrics_disc]
for metric, i in zip(self.disc_metrics,
range(len(self.disc_metrics))):
if hasattr(metric, "name"):
name = metric.name
elif hasattr(metric, "__name__"):
name = metric.__name__
elif hasattr(metric, "__class__"):
name = metric.__class__.__name__
else:
name = "met"
if "_" in name:
new_name = ""
for split in name.split("_"):
if len(split) > 0:
new_name += split[0]
name = new_name
else:
name = name[:3]
metric.name = name
if metric.name in ["acc", "Acc", "accuracy", "Accuracy"]:
self.disc_metrics[i] = accuracy
self.disc_metrics[i].name = "acc"
compile_params = dict(
optimizer=optimizer,
loss=loss,
metrics=metrics_task,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
)
compile_params = {k: v for k, v in compile_params.items() if v is not None}
compile_params = self._filter_params(super().compile, compile_params)
if ((not "optimizer" in compile_params) or
(compile_params["optimizer"] is None)):
compile_params["optimizer"] = RMSprop()
else:
if optimizer is None:
if not isinstance(compile_params["optimizer"], str):
optim_params = self._filter_params(
compile_params["optimizer"].__init__)
if len(optim_params) > 0:
kwargs = compile_params["optimizer"].get_config()
kwargs.update(optim_params)
optimizer = compile_params["optimizer"].__class__(**kwargs)
else:
optimizer = compile_params["optimizer"]
compile_params["optimizer"] = optimizer
if not "loss" in compile_params:
compile_params["loss"] = "mse"
self.task_loss_ = tf.keras.losses.get(compile_params["loss"])
super().compile(
**compile_params
)
# Set optimizer for encoder and discriminator
if not hasattr(self, "optimizer_enc"):
self.optimizer_enc = self.optimizer
if not hasattr(self, "optimizer_disc"):
self.optimizer_disc = self.optimizer
def call(self, inputs):
x = self.encoder_(inputs)
return self.task_(x)
def train_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# Run forward pass.
with tf.GradientTape() as tape:
y_pred = self(Xs, training=True)
loss = self.compiled_loss(
ys, y_pred, regularization_losses=self.losses)
loss = tf.reduce_mean(loss)
# Run backwards pass.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
self.compiled_metrics.update_state(ys, y_pred)
# Collect metrics to return
return_metrics = {}
for metric in self.metrics:
result = metric.result()
if isinstance(result, dict):
return_metrics.update(result)
else:
return_metrics[metric.name] = result
return return_metrics
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""
Generates output predictions for the input samples.
Computation is done in batches. This method is designed for performance in
large scale inputs. For small amount of inputs that fit in one batch,
directly using `__call__()` is recommended for faster execution, e.g.,
`model(x)`, or `model(x, training=False)` if you have layers such as
`tf.keras.layers.BatchNormalization` that behaves differently during
inference. Also, note the fact that test loss is not affected by
regularization layers like noise and dropout.
Parameters
----------
x: array
Input samples.
batch_size: int (default=`None`)
Number of samples per batch.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of dataset, generators, or `keras.utils.Sequence` instances
(since they generate batches).
verbose: int (default=0)
Verbosity mode, 0 or 1.
steps: int (default=None)
Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
dataset and `steps` is None, `predict()` will
run until the input dataset is exhausted.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: int (default=10)
Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: int (default=1)
Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1.
use_multiprocessing: bool (default=False)
Used for generator or `keras.utils.Sequence` input only.
If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns
-------
y_pred : array
Numpy array(s) of predictions.
"""
return super().predict(x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def transform(self, X):
"""
Return the encoded features of X.
Parameters
----------
X : array
input data
Returns
-------
X_enc : array
predictions of encoder network
"""
return self.encoder_.predict(X)
def predict_disc(self, X):
"""
Return predictions of the discriminator on the encoded features.
Parameters
----------
X : array
input data
Returns
-------
y_disc : array
predictions of discriminator network
"""
return self.discriminator_.predict(self.transform(X))
def predict_task(self, X):
"""
Return predictions of the task on the encoded features.
Parameters
----------
X : array
input data
Returns
-------
y_task : array
predictions of task network
"""
return self.task_.predict(self.transform(X))
def score(self, X, y, sample_weight=None):
"""
Return the evaluation of the model on X, y.
Call `evaluate` on tensorflow Model.
Parameters
----------
X : array
input data
y : array
output data
sample_weight : array (default=None)
Sample weights
Returns
-------
score : float
Score.
"""
if hasattr(X, "shape") and np.prod(X.shape) <= 10**8:
score = self.evaluate(
X, y,
sample_weight=sample_weight,
batch_size=len(X)
)
else:
score = self.evaluate(
X, y,
sample_weight=sample_weight
)
if isinstance(score, (tuple, list)):
score = score[0]
return score
# def _check_validation_data(self, validation_data, batch_size, shuffle):
# if isinstance(validation_data, tuple):
# X_val = validation_data[0]
# y_val = validation_data[1]
# validation_data = tf.data.Dataset.zip(
# (tf.data.Dataset.from_tensor_slices(X_val),
# tf.data.Dataset.from_tensor_slices(y_val))
# )
# if shuffle:
# validation_data = validation_data.shuffle(buffer_size=1024).batch(batch_size)
# else:
# validation_data = validation_data.batch(batch_size)
# return validation_data
def _get_legal_params(self, params):
legal_params_fct = [self.__init__, super().fit, super().compile]
if "optimizer" in params:
optimizer = params["optimizer"]
elif hasattr(self, "optimizer"):
optimizer = self.optimizer
else:
optimizer = None
if (optimizer is not None) and (not isinstance(optimizer, str)):
legal_params_fct.append(optimizer.__init__)
legal_params = ["domain", "val_sample_size", "optimizer_enc", "optimizer_disc"]
for func in legal_params_fct:
args = [
p.name
for p in inspect.signature(func).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
legal_params = legal_params + args
if "pretrain" in legal_params:
legal_params_fct = [super().fit, super().compile]
if "pretrain__optimizer" in params:
if not isinstance(params["pretrain__optimizer"], str):
legal_params_fct.append(params["pretrain__optimizer"].__init__)
for func in legal_params_fct:
args = [
p.name
for p in inspect.signature(func).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
legal_params = legal_params + ["pretrain__"+name for name in args]
return legal_params
def _initialize_weights(self, shape_X):
self(np.zeros((1,) + shape_X))
if hasattr(self, "encoder_"):
X_enc = self.encoder_(np.zeros((1,) + shape_X))
if hasattr(self, "discriminator_"):
self.discriminator_(X_enc)
def _get_length_dataset(self, dataset, domain="src"):
try:
length = len(dataset)
except:
if self.verbose:
print("Computing %s dataset size..."%domain)
if not hasattr(self, "length_%s_"%domain):
length = 0
for _ in dataset:
length += 1
else:
length = getattr(self, "length_%s_"%domain)
if self.verbose:
print("Done!")
return length
def _check_for_batch(self, dataset):
if dataset.__class__.__name__ == "BatchDataset":
return True
if hasattr(dataset, "_input_dataset"):
return self._check_for_batch(dataset._input_dataset)
elif hasattr(dataset, "_datasets"):
checks = []
for data in dataset._datasets:
checks.append(self._check_for_batch(data))
return np.all(checks)
else:
return False
def _unpack_data(self, data):
data_src = data[0]
data_tgt = data[1]
Xs = data_src[0]
ys = data_src[1]
if isinstance(data_tgt, tuple):
Xt = data_tgt[0]
yt = data_tgt[1]
return Xs, Xt, ys, yt
else:
Xt = data_tgt
return Xs, Xt, ys, None
def _get_disc_metrics(self, ys_disc, yt_disc):
disc_dict = {}
return disc_dict
def _initialize_networks(self):
if self.encoder is None:
self.encoder_ = get_default_encoder(name="encoder", state=self.random_state)
else:
self.encoder_ = check_network(self.encoder,
copy=self.copy,
name="encoder")
if self.task is None:
self.task_ = get_default_task(name="task", state=self.random_state)
else:
self.task_ = check_network(self.task,
copy=self.copy,
name="task")
if self.discriminator is None:
self.discriminator_ = get_default_discriminator(name="discriminator", state=self.random_state)
else:
self.discriminator_ = check_network(self.discriminator,
copy=self.copy,
name="discriminator")
def _initialize_pretain_networks(self):
pass
| 60,667 | 35.111905 | 106 | py |
adapt | adapt-master/adapt/utils.py | """
Utility functions for adapt package.
"""
import warnings
import inspect
from copy import deepcopy
import numpy as np
from sklearn.datasets import make_classification
from sklearn.utils import check_array
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import Sequential, Model
from tensorflow.keras.layers import Layer, Dense, Flatten, Input
from tensorflow.keras.models import clone_model
from tensorflow.keras.initializers import GlorotUniform
class UpdateLambda(tf.keras.callbacks.Callback):
"""
Update Lambda trade-off
This Callback increases the ``lambda_`` trade-off parameter
at each batch.
The trade-off is increased from ``lambda_init`` to ``lambda_max``
in ``max_steps`` number of gradient steps according to the
following formula:
``lambda_`` = A * [ 2/(1 + exp(-``gamma`` * p)) - 1. ] + B
With p increasing from 0 to 1 and A, B two constants.
Parameters
----------
lambda_init : float (default=0.)
Initial trade-off
lambda_max : float (default=1.)
Trade-off after ``max_steps`` gradient updates.
max_steps : int (default=1000)
Number of gradient updates before getting ``lambda_max``
gamma : float (default=1.)
Speed factor. High ``gamma`` will increase the speed of
``lambda_`` increment.
"""
def __init__(self, lambda_init=0., lambda_max=1., max_steps=1000, gamma=1.):
self.lambda_init = lambda_init
self.lambda_max = lambda_max
self.max_steps = max_steps
self.gamma = gamma
self.steps = 0.
def on_batch_end(self, batch, logs=None):
self.steps += 1.
progress = min(self.steps / self.max_steps, 1.)
lambda_ = 2. / (1. + tf.exp(-self.gamma * progress)) - 1.
lambda_ /= (2 / (1. + tf.exp(-self.gamma)) - 1.)
lambda_ *= (self.lambda_max - self.lambda_init)
lambda_ += self.lambda_init
self.model.lambda_.assign(lambda_)
def accuracy(y_true, y_pred):
"""
Custom accuracy function which can handle
probas vector in both binary and multi classification
Parameters
----------
y_true : Tensor
True tensor.
y_pred : Tensor
Predicted tensor.
Returns
-------
Boolean Tensor
"""
# TODO: accuracy can't handle 1D ys.
multi_columns_t = K.cast(K.greater(K.shape(y_true)[1], 1),
"float32")
binary_t = K.reshape(K.sum(K.cast(K.greater(y_true, 0.5),
"float32"), axis=-1), (-1,))
multi_t = K.reshape(K.cast(K.argmax(y_true, axis=-1),
"float32"), (-1,))
y_true = ((1 - multi_columns_t) * binary_t +
multi_columns_t * multi_t)
multi_columns_p = K.cast(K.greater(K.shape(y_pred)[1], 1),
"float32")
binary_p = K.reshape(K.sum(K.cast(K.greater(y_pred, 0.5),
"float32"), axis=-1), (-1,))
multi_p = K.reshape(K.cast(K.argmax(y_pred, axis=-1),
"float32"), (-1,))
y_pred = ((1 - multi_columns_p) * binary_p +
multi_columns_p * multi_p)
return tf.keras.metrics.get("acc")(y_true, y_pred)
def predict(self, x, **kwargs):
if hasattr(x, "shape") and (np.prod(x.shape) <= 10**8):
pred = self.__call__(tf.identity(x)).numpy()
else:
pred = Sequential.predict(self, x, **kwargs)
return pred
def check_arrays(X, y, **kwargs):
"""
Check arrays and reshape 1D array in 2D array
of shape (-1, 1). Check if the length of X
match the length of y.
Parameters
----------
X : numpy array
Input data.
y : numpy array
Output data.
Returns
-------
X, y
"""
X = check_array(X, ensure_2d=True, allow_nd=True, **kwargs)
y = check_array(y, ensure_2d=False, allow_nd=True, dtype=None, **kwargs)
if X.shape[0] != y.shape[0]:
raise ValueError("Length of X and y mismatch: %i != %i"%
(X.shape[0],y.shape[0]))
return X, y
def check_estimator(estimator=None, copy=True,
name=None,
display_name="estimator",
task=None,
force_copy=False):
"""
Check estimator.
Check that ``estimator`` is a sklearn ``BaseEstimator``
or a tensorflow ``Model``.
Parameters
----------
estimator : sklearn BaseEstimator or tensorflow Model
Estimator. If ``None`` a LinearRegression instance
or a LogisticRegression instance is returned
depending on the ``task`` argument.
copy : boolean (default=False)
Whether to return a copy of the estimator or not.
If cloning fail, a warning is raised.
display_name: str (default="estimator")
Name to display if an error or warning is raised
task : str (default=None)
Task at hand. Possible value :
(``None``, ``"reg"``, ``"class"``)
force_copy : boolean (default=False)
If True, an error is raised if the cloning failed.
"""
if estimator is None:
if task == "class":
estimator = LogisticRegression()
else:
estimator = LinearRegression()
# TODO, add KerasWrappers in doc and error message
if isinstance(estimator, (BaseEstimator, KerasClassifier, KerasRegressor)):
if (isinstance(estimator, ClassifierMixin) and task=="reg"):
raise ValueError("`%s` argument is a sklearn `ClassifierMixin` instance "
"whereas the considered object handles only regression task. "
"Please provide a sklearn `RegressionMixin` instance or a "
"tensorflow Model instance."%display_name)
if (isinstance(estimator, RegressorMixin) and task=="class"):
raise ValueError("`%s` argument is a sklearn `RegressionMixin` instance "
"whereas the considered object handles only classification task. "
"Please provide a sklearn `ClassifierMixin` instance or a "
"tensorflow Model instance."%display_name)
if copy:
try:
if isinstance(estimator, (KerasClassifier, KerasRegressor)):
# TODO, copy fitted parameters and Model
new_estimator = clone(estimator)
else:
new_estimator = deepcopy(estimator)
except Exception as e:
if force_copy:
raise ValueError("`%s` argument can't be duplicated. "
"Recorded exception: %s. "%
(display_name, e))
else:
warnings.warn("`%s` argument can't be duplicated. "
"Recorded exception: %s. "
"The current estimator will be used. "
"Use `copy=False` to hide this warning."%
(display_name, e))
new_estimator = estimator
else:
new_estimator = estimator
elif isinstance(estimator, Model):
new_estimator = check_network(network=estimator,
copy=copy,
name=name,
force_copy=force_copy)
else:
raise ValueError("`%s` argument is neither a sklearn `BaseEstimator` "
"instance nor a tensorflow Model instance. "
"Given argument, %s"%
(display_name, str(estimator)))
return new_estimator
def check_network(network, copy=True,
name=None,
force_copy=False):
"""
Check if the given network is a tensorflow Model.
If ``copy`` is ``True``, a copy of the network is
returned if possible.
Parameters
----------
network : tensorflow Model
Network to check.
copy : boolean (default=True)
Whether to return a copy of the network or not.
If cloning fail, a warning is raised.
name : str (default="network")
Name for the network.
force_copy : boolean (default=False)
If True, an error is raised if the cloning failed.
"""
if not isinstance(network, Model):
raise ValueError('Expected `network` argument '
'to be a `Model` instance, got: %s'%str(network))
if copy:
try:
# TODO, be carefull of network with weights
# but no input_shape
if hasattr(network, "input_shape"):
shape = network.input_shape[1:]
new_network = clone_model(network, input_tensors=Input(shape))
new_network.set_weights(network.get_weights())
elif network.built:
shape = network._build_input_shape[1:]
new_network = clone_model(network, input_tensors=Input(shape))
new_network.set_weights(network.get_weights())
else:
new_network = clone_model(network)
except Exception as e:
if force_copy:
raise ValueError("`network` argument can't be duplicated. "
"Recorded exception: %s. "%str(e))
else:
warnings.warn("`network` argument can't be duplicated. "
"Recorded exception: %s. "
"The current network will be used. "
"Use `copy=False` to hide this warning."%str(e))
new_network = network
else:
new_network = network
if name is not None:
new_network._name = name
# Override the predict method to speed the prediction for small dataset
new_network.predict = predict.__get__(new_network)
return new_network
def get_default_encoder(name=None, state=None):
"""
Return a tensorflow Model of one layer
with 10 neurons and a relu activation.
Returns
-------
tensorflow Model
"""
model = Sequential(name=name)
model.add(Flatten())
if state is not None:
model.add(Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
else:
model.add(Dense(10, activation="relu"))
return model
def get_default_task(activation=None, name=None, state=None):
"""
Return a tensorflow Model of two hidden layers
with 10 neurons each and relu activations. The
last layer is composed of one neuron with linear
activation.
Parameters
----------
activation : str (default=None)
Final activation
Returns
-------
tensorflow Model
"""
model = Sequential(name=name)
model.add(Flatten())
if state is not None:
model.add(Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
model.add(Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
model.add(Dense(1,
kernel_initializer=GlorotUniform(seed=state)))
else:
model.add(Dense(10, activation="relu"))
model.add(Dense(10, activation="relu"))
model.add(Dense(1, activation=activation))
return model
def get_default_discriminator(name=None, state=None):
"""
Return a tensorflow Model of two hidden layers
with 10 neurons each and relu activations. The
last layer is composed of one neuron with sigmoid
activation.
Returns
-------
tensorflow Model
"""
model = Sequential(name=name)
model.add(Flatten())
if state is not None:
model.add(Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
model.add(Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
model.add(Dense(1, activation="sigmoid",
kernel_initializer=GlorotUniform(seed=state)))
else:
model.add(Dense(10, activation="relu"))
model.add(Dense(10, activation="relu"))
model.add(Dense(1, activation="sigmoid"))
return model
@tf.custom_gradient
def _grad_handler(x, lambda_):
y = tf.identity(x)
def custom_grad(dy):
return (lambda_ * dy, 0. * lambda_)
return y, custom_grad
class GradientHandler(Layer):
"""
Multiply gradients with a scalar during backpropagation.
Act as identity in forward step.
Parameters
----------
lambda_init : float (default=1.)
Scalar multiplier
"""
def __init__(self, lambda_init=1., name="g_handler"):
super().__init__(name=name)
self.lambda_init=lambda_init
self.lambda_ = tf.Variable(lambda_init,
trainable=False,
dtype="float32")
def call(self, x):
"""
Call gradient handler.
Parameters
----------
x: object
Inputs
Returns
-------
x, custom gradient function
"""
return _grad_handler(x, self.lambda_)
def get_config(self):
"""
Return config dictionnary.
Returns
-------
dict
"""
config = super().get_config().copy()
config.update({
'lambda_init': self.lambda_init
})
return config
def make_classification_da(n_samples=100,
n_features=2,
random_state=2):
"""
Generate a classification dataset for DA.
Parameters
----------
n_samples : int, optional (default=100)
Size of source and target samples.
n_features : int, optional (default=2)
Number of features.
random_state: int, optional (default=0)
Random state number.
Returns
-------
Xs : numpy array
Source input data
ys : numpy array
Source output data
Xt : numpy array
Target input data
yt : numpy array
Target output data
"""
np.random.seed(random_state)
Xs, ys = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_features,
n_redundant=0, n_repeated=0,
n_clusters_per_class=1, n_classes=2,
shuffle=False)
Xt, yt = make_classification(n_samples=n_samples, n_features=n_features,
n_informative=n_features,
n_redundant=0, n_repeated=0,
n_clusters_per_class=1, n_classes=2,
shuffle=False)
yt[:int(n_samples/2)] = 1; yt[int(n_samples/2):] = 0
Xt[:, 0] += 1; Xt[:, 1] += 0.5;
for i in range(n_features):
Xs[:, i] = (Xs[:, i]-np.min(Xs[:, i]))/np.max(Xs[:, i]-np.min(Xs[:, i]))
Xt[:, i] = (Xt[:, i]-np.min(Xt[:, i]))/np.max(Xt[:, i]-np.min(Xt[:, i]))
return Xs, ys, Xt, yt
def make_regression_da(n_samples=100,
n_features=1,
random_state=0):
"""
Generate a regression dataset for DA.
Parameters
----------
n_samples : int (default=100)
Size of source and target samples.
n_features : int (default=1)
Sample dimension.
random_state: int (default=0)
Random state number.
Returns
-------
Xs : numpy array
Source input data
ys : numpy array
Source output data
Xt : numpy array
Target input data
yt : numpy array
Target output data
"""
np.random.seed(random_state)
Xs = np.random.uniform(size=(n_samples, n_features)) * 4 - 2
Xs = np.sort(Xs)
Xt = np.random.uniform(size=(n_samples, n_features)) * 2.5 + 2
ys = (Xs[:, 0] + 0.1 * Xs[:, 0] ** 5 +
np.random.randn(n_samples) * 0.2 + 1)
yt = (Xt[:, 0] + 0.1 * (Xt[:, 0] - 2) **4 +
np.random.randn(n_samples) * 0.4 + 1)
Xt = (Xt - Xs.mean(0)) / Xs.std(0)
yt = (yt - ys.ravel().mean()) / (2 * ys.ravel().std())
Xs = (Xs - Xs.mean(0)) / (Xs.std(0))
ys = (ys - ys.ravel().mean()) / (2 * ys.ravel().std())
X = np.concatenate((Xs, Xt))
Xs = ((Xs - X.mean(0)) / X.std(0)) / 3
Xt = ((Xt - X.mean(0)) / X.std(0)) / 3
return Xs, ys, Xt, yt
def check_sample_weight(sample_weight, X):
"""
Check sample weights.
Parameters
----------
sample_weight : array
Sample weights.
X : array
Input array
Returns
-------
sample_weight : array
"""
if not sample_weight is None:
sample_weight = check_array(
sample_weight,
accept_sparse=False,
ensure_2d=False,
)
if len(sample_weight) != X.shape[0]:
raise ValueError("`sample_weight` and X should have"
" same length, got %i, %i"%
(len(sample_weight), X.shape[0]))
if np.any(sample_weight<0):
raise ValueError("All weights from `sample_weight`"
" should be positive.")
if sample_weight.sum() <= 0:
sample_weight = np.ones(X.shape[0])
return sample_weight
def set_random_seed(random_state):
"""
Set random seed for numpy and
Tensorflow
Parameters
----------
random_state : int or None
Random state, if None
the current random generators
remain unchanged
"""
if random_state is None:
pass
else:
np.random.seed(random_state)
tf.random.set_seed(random_state)
def new_init(self, **kwargs):
for k, v in self.__frozen_dict__.items():
setattr(self, k, v)
def __deepcopy__(self, memo):
return self
def check_fitted_estimator(estimator):
"""
Check Fitted Estimator
This function is used to create a custom embedding
on fitted estimator object in order to be able to
clone them and keep its fitted arguments.
Parameters
----------
estimator : sklearn estimator
Fitted estimator
Returns
-------
estimator : instance of "Fitted" + estimator class name
Embedded fitted estimator
"""
if "Fitted" == estimator.__class__.__name__[:6]:
return estimator
else:
new_class = type("Fitted"+estimator.__class__.__name__,
(estimator.__class__,),
{"__init__": new_init,
"__frozen_dict__": {k: v for k, v in estimator.__dict__.items()}})
return new_class()
def check_fitted_network(estimator):
"""
Check Fitted Network
Overwrite the ``__deepcopy__`` method from network
such that deepcopy returns the same estimator.
Parameters
----------
estimator : tensorflow Model
Fitted network
Returns
-------
estimator : tensorflow Model
Modified fitted network
"""
if isinstance(estimator, Model):
estimator.__deepcopy__ = __deepcopy__.__get__(estimator)
return estimator
# Try to save the initial estimator if it is a Keras Model
# This is required for cloning the adapt method.
# if isinstance(self.estimator, Model):
# self._has_keras_estimator = True
# try:
# self._deepcopy_estimator = check_estimator(estimator,
# copy=True,
# task=None,
# force_copy=True)
# except BaseException as err:
# if "The current network will be used" in str(err):
# warnings.warn("The Tensorflow model used as estimator"
# " can't be deep copied. "
# "This may provoke some undesired behaviour"
# " when cloning the object.")
# else:
# raise
# else:
# self._has_keras_estimator = False | 20,777 | 30.52959 | 95 | py |
adapt | adapt-master/adapt/metrics.py | import inspect
import copy
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from scipy import linalg
from sklearn.metrics import pairwise
from sklearn.base import clone
from sklearn.model_selection import train_test_split
from sklearn.utils import check_array
from adapt.utils import get_default_discriminator, check_sample_weight
EPS = np.finfo(float).eps
def _estimator_predict(estimator, Xs, Xt, X):
if hasattr(estimator, "transform"):
args = [
p.name
for p in inspect.signature(estimator.transform).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "domain" in args:
Xt = estimator.transform(Xt, domain="tgt")
Xs = estimator.transform(Xs, domain="src")
else:
Xt = estimator.transform(Xt)
Xs = estimator.transform(Xs)
elif hasattr(estimator, "predict_weights"):
X = check_array(X, ensure_2d=True, allow_nd=True, accept_sparse=True)
sample_weight = estimator.predict_weights()
if len(X) != len(sample_weight):
sample_weight = np.ones(len(X))
sample_weight = check_sample_weight(sample_weight, X)
sample_weight /= sample_weight.sum()
bootstrap_index = np.random.choice(
X.shape[0], size=X.shape[0], replace=True, p=sample_weight)
Xs = X[bootstrap_index]
else:
raise ValueError("The Adapt model should implement"
" a transform or predict_weights methods")
return Xs, Xt
def _fit_alpha(Xs, Xt, centers, sigma):
"""
Fit alpha coeficients to compute J-score
"""
A = pairwise.rbf_kernel(Xt, centers, sigma)
b = np.mean(pairwise.rbf_kernel(centers, Xs, sigma), axis=1)
b = b.reshape(-1, 1)
alpha = np.ones((len(centers), 1)) / len(centers)
previous_objective = -np.inf
objective = np.mean(np.log(np.dot(A, alpha) + EPS))
k = 0
while k < 5000 and objective-previous_objective > 1e-6:
previous_objective = objective
alpha_p = np.copy(alpha)
alpha += 1e-4 * np.dot(
np.transpose(A), 1./(np.dot(A, alpha) + EPS)
)
alpha += b * ((((1-np.dot(np.transpose(b), alpha)) /
(np.dot(np.transpose(b), b) + EPS))))
alpha = np.maximum(0, alpha)
alpha /= (np.dot(np.transpose(b), alpha) + EPS)
objective = np.mean(np.log(np.dot(A, alpha) + EPS))
k += 1
return alpha
def make_uda_scorer(func, Xs, Xt, greater_is_better=False, **kwargs):
"""
Make a scorer function from an adapt metric.
The goal of adapt metric is to measure the closeness between
a source input dataset `Xs` and a target input dataset `Xt`.
If `Xs` is close from `Xt`, it can be expected that a good
model trained on source will perform well on target.
The returned score function will apply `func` on
a transformation of `Xs` and `Xt` given to `make_uda_scorer`.
If the estimator given in the score function is a
feature-based method, the metric will be applied
on the encoded `Xs` and `Xt`. If the estimator is instead an
instance-based method, a weighted bootstrap sample of `Xs`
will be compared to `Xt`.
**IMPORTANT NOTE** : when the returned score function is used
with ``GridSearchCV`` from sklearn, the parameter
``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
Parameters
----------
func : callable
Adapt metric with signature
``func(Xs, Xt, **kwargs)``.
Xs : array
Source input dataset
Xt : array
Target input dataset
greater_is_better : bool, default=True
Whether the best outputs of ``func`` are the greatest
ot the lowest. For all adapt metrics, the low values
mean closeness between Xs and Xt.
kwargs : key, value arguments
Parameters given to ``func``.
Returns
-------
scorer : callable
A scorer function with signature
``scorer(estimator, X, y_true=None)``.
The scorer function transform the parameters
`Xs` and `Xt` with the given ``estimator``.
Then it rerurns ``func(Xst, Xtt)`` with `Xst`
and `Xtt` the transformed data.
Notes
-----
When the returned score function is used
with ``GridSearchCV`` from sklearn, the parameter
``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
"""
def scorer(estimator, X, y_true=None):
"""
Scorer function for unsupervised domain adaptation.
For fearure_based method, scorer will apply the
``transform`` method of the fitted ``estimator``
to the parameters `Xs` and `Xt` given when building scorer.
Then it computes a metric between the two transformed
datasets.
For instance-based method a weighted bootstrap of
the input paramter `X` is performed with the weights return
by the ``predict_weights`` method of the fitted ``estimator``.
Then it computes a metric beteen the bootstraped `X` and `Xt`.
**IMPORTANT NOTE** : when scorer is used
with ``GridSearchCV`` from sklearn, the parameter
``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
Parameters
----------
estimator : Adapt estimator
A fitted adapt estimator which should implements
a ``predict_weights`` or ``transform`` method.
X : array
Input source data
y_true : array (default=None)
Not used. Here for compatibility with sklearn.
Notes
-----
When scorer is used with ``GridSearchCV`` from sklearn,
the parameter ``return_train_score`` must be set to ``True``.
The adapt score then corresponds to the train scores.
"""
nonlocal Xs
nonlocal Xt
nonlocal greater_is_better
nonlocal kwargs
Xs, Xt = _estimator_predict(estimator, Xs=Xs, Xt=Xt, X=X)
score = func(Xs, Xt, **kwargs)
if not greater_is_better:
score *= -1
return score
return scorer
def cov_distance(Xs, Xt):
"""
Compute the mean absolute difference
between the covariance matrixes of Xs and Xt
Parameters
----------
Xs : array
Source array
Xt : array
Target array
Returns
-------
score : float
See also
--------
frechet_distance
CORAL
References
----------
.. [1] `[1] <https://arxiv.org/pdf/1511.05547.pdf>`_ Sun B., Feng J., Saenko K. \
"Return of frustratingly easy domain adaptation". In AAAI, 2016.
"""
cov_Xs = np.cov(Xs, rowvar=False)
cov_Xt = np.cov(Xt, rowvar=False)
return np.mean(np.abs(cov_Xs-cov_Xt))
def frechet_distance(Xs, Xt):
"""
Compute the frechet distance
between Xs and Xt.
.. math::
\\Delta = ||\\mu_S - \\mu_T||_2^2 + Tr\\left(\\Sigma_S + \\Sigma_T
- 2 (\\Sigma_S \\cdot \\Sigma_T)^{\\frac{1}{2}} \\right)
Where:
- :math:`\\mu_S, \\mu_T` are the mean of Xs, Xt along first axis.
- :math:`\\Sigma_S, \\Sigma_T` are the covariance matrix of Xs, Xt.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
Returns
-------
score : float
See also
--------
normalized_frechet_distance
linear_discrepancy
normalized_linear_discrepancy
References
----------
.. [1] `[1] <https://www.sciencedirect.com/science/article/pii/00\
47259X8290077X?via%3Dihub>`_ Dowson, D. C; Landau, B. V. "The Fréchet \
distance between multivariate normal distributions". JMVA. 1982
"""
mu1 = np.mean(Xs, axis=0)
sigma1 = np.cov(Xs, rowvar=False)
mu2 = np.mean(Xt, axis=0)
sigma2 = np.cov(Xt, rowvar=False)
ssdiff = np.sum((mu1 - mu2)**2.0)
product = np.array(sigma1.dot(sigma2))
if product.ndim < 2:
product = product.reshape(-1, 1)
covmean = linalg.sqrtm(product)
if np.iscomplexobj(covmean):
covmean = covmean.real
return ssdiff + np.trace(sigma1 + sigma2 - 2.0 * covmean)
def linear_discrepancy(Xs, Xt, power_method=False, n_iter=20):
"""
Compute the linear discrepancy
between Xs and Xt.
.. math::
\\Delta = \\max_{u \\in \\mathbb{R}^p} u^T (X_S^T X_S - X_T^T X_T) u
Where:
- :math:`p` is the number of features of Xs and Xt.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
power_method : bool (default=False)
Weither to use the power method
approximation or not.
n_iter : int (default=20)
Number of iteration for power method
Returns
-------
score : float
See also
--------
normalized_linear_discrepancy
frechet_distance
normalized_frechet_distance
References
----------
.. [1] `[1] <https://arxiv.org/pdf/0902.3430.pdf>`_ \
Y. Mansour, M. Mohri, and A. Rostamizadeh. "Domain \
adaptation: Learning bounds and algorithms". In COLT, 2009.
"""
M = (1/len(Xs)) * np.dot(np.transpose(Xs), Xs) - (1/len(Xt)) * np.dot(np.transpose(Xt), Xt)
if power_method:
x = np.ones(len(M))
for _ in range(n_iter):
x = M.dot(x)
x_max = np.max(np.abs(x))
x = (1 / (x_max + EPS)) * x
else:
e, v = linalg.eig(M)
x_max = np.max(np.abs(e))
return x_max
def normalized_linear_discrepancy(Xs, Xt, power_method=False, n_iter=20):
"""
Compute the normalized linear discrepancy
between Xs and Xt.
Xs and Xt are first scaled by a factor
``(std(Xs) + std(Xt)) / 2``
and centered around ``(mean(Xs) + mean(Xt)) / 2``
Then, the linear discrepancy is computed and divided by the number
of features.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
Returns
-------
score : float
See also
--------
linear_discrepancy
frechet_distance
normalized_frechet_distance
References
----------
.. [1] `[1] <https://arxiv.org/pdf/0902.3430.pdf>`_ \
Y. Mansour, M. Mohri, and A. Rostamizadeh. "Domain \
adaptation: Learning bounds and algorithms". In COLT, 2009.
"""
std = (np.std(Xs) + np.std(Xt) + EPS)/2
mu = (np.mean(Xs) + np.mean(Xt))/2
x_max = linear_discrepancy((Xs-mu)/std, (Xt-mu)/std, power_method, n_iter)
return x_max / Xs.shape[1]
def normalized_frechet_distance(Xs, Xt):
"""
Compute the normalized frechet distance
between Xs and Xt.
Xs and Xt are first scaled by a factor
``(std(Xs) + std(Xt)) / 2``
and centered around ``(mean(Xs) + mean(Xt)) / 2``
Then, the frechet distance is computed and divided by the number
of features.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
Returns
-------
score : float
See also
--------
frechet_distance
linear_discrepancy
normalized_linear_discrepancy
References
----------
.. [1] `[1] <https://www.sciencedirect.com/science/article/pii/00\
47259X8290077X?via%3Dihub>`_ Dowson, D. C; Landau, B. V. "The Fréchet \
distance between multivariate normal distributions". JMVA. 1982
"""
std = (np.std(Xs) + np.std(Xt) + EPS)/2
mu = (np.mean(Xs) + np.mean(Xt))/2
x_max = frechet_distance((Xs-mu)/std, (Xt-mu)/std)
return x_max / Xs.shape[1]
def neg_j_score(Xs, Xt, max_centers=100, sigma=None):
"""
Compute the negative J-score between Xs and Xt.
.. math::
\\Delta = -\\int_{\\mathcal{X}} P(X_T) \\log(P(X_T) / P(X_S))
Where:
- :math:`P(X_S), P(X_T)` are the probability density
functions of Xs and Xt.
The source and target probability density functions
are approximated with a mixture of gaussian kernels
of bandwith ``sigma`` and centered in ``max_centers``
random points of Xt. The coefficient of the mixture
are determined by solving a convex optimization (see [1])
Parameters
----------
Xs : array
Source array
Xt : array
Target array
max_centers : int (default=100)
Maximum number of centers from Xt
sigma : float (default=None)
Kernel bandwidth. If ``None``, the mean
of pairwise distances between data from
Xt is used.
Returns
-------
score : float
See also
--------
KLIEP
References
----------
.. [1] `[1] <https://papers.nips.cc/paper/3248-direct-importance-estimation\
-with-model-selection-and-its-application-to-covariate-shift-adaptation.pdf>`_ \
M. Sugiyama, S. Nakajima, H. Kashima, P. von Bünau and M. Kawanabe. \
"Direct importance estimation with model selection and its application \
to covariateshift adaptation". In NIPS 2007
"""
Xs = check_array(Xs, ensure_2d=True, allow_nd=True, accept_sparse=True)
Xt = check_array(Xt, ensure_2d=True, allow_nd=True, accept_sparse=True)
if len(Xt) > max_centers:
random_index = np.random.choice(
len(Xt), size=max_centers, replace=False)
centers = Xt[random_index]
else:
centers = Xt
if sigma is None:
sigma = pairwise.euclidean_distances(Xt, Xt).mean()
alphas = _fit_alpha(Xs, Xt, centers, sigma)
j_score_ = np.mean(np.log(np.dot(
pairwise.rbf_kernel(Xt,
centers,
sigma),
alphas) + EPS))
return -j_score_
def domain_classifier(Xs, Xt, classifier=None, **fit_params):
"""
Return 1 minus the mean square error of a classifer
disciminating between Xs and Xt.
.. math::
\\Delta = 1 - \\min_{h \\in H} || h(X_S) - 1 ||^2 +
|| h(X_T) ||^2
Where:
- :math:`H` is a class of classifier.
Parameters
----------
Xs : array
Source array
Xt : array
Target array
classifier : sklearn estimator or tensorflow Model instance
Classifier
fit_params : key, value arguments
Parameters for the fit method of the classifier.
Returns
-------
score : float
See also
--------
reverse_validation
DANN
References
----------
.. [1] `[1] <http://jmlr.org/papers/volume17/15-239/15-239.pdf>`_ Y. Ganin, \
E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, \
and V. Lempitsky. "Domain-adversarial training of neural networks". In JMLR, 2016.
"""
Xs_train, Xs_test = train_test_split(Xs, train_size=0.8)
Xt_train, Xt_test = train_test_split(Xt, train_size=0.8)
X_train = np.concatenate((Xs_train, Xt_train))
y_train = np.concatenate((np.zeros(len(Xs_train)),
np.ones(len(Xt_train))))
X_test = np.concatenate((Xs_test, Xt_test))
y_test = np.concatenate((np.zeros(len(Xs_test)),
np.ones(len(Xt_test))))
if classifier is None:
classifier = get_default_discriminator()
classifier.compile(optimizer=Adam(0.001), loss="bce")
if fit_params == {}:
fit_params = dict(epochs=max(1, int(3000 * 64 / len(X_train))),
batch_size=64,
verbose=0)
classifier.fit(X_train, y_train, **fit_params)
y_pred = classifier.predict(X_test)
return 1-np.mean(np.square(y_pred-y_test.reshape(y_pred.shape)))
def reverse_validation(model, Xs, ys, Xt, **fit_params):
"""
Reverse validation.
The reverse validation score is computed as a source error
by inversing the role of the source and the target domains.
A clone of the model is trained to adapt from the target to
the source using the model target predictions as
pseudo target labels. Then the final score is computed between
the source prediction of the cloned model and the groundtruth.
Parameters
----------
model : BaseAdapt instance
Adaptation model
Xs : array
Source input array
ys : array
Source output array
Xt : array
Target input array
fit_params : key, value arguments
Parameters for the fit method of the cloned model.
Returns
-------
score : float
See also
--------
domain_classifier
DANN
References
----------
.. [1] `[1] <http://jmlr.org/papers/volume17/15-239/15-239.pdf>`_ Y. Ganin, \
E. Ustinova, H. Ajakan, P. Germain, H. Larochelle, F. Laviolette, M. Marchand, \
and V. Lempitsky. "Domain-adversarial training of neural networks". In JMLR, 2016.
"""
yt = model.predict(Xt)
if yt.ndim == 1 and ys.ndim > 1:
yt = yt.reshape(-1, 1)
if ys.ndim == 1 and yt.ndim > 1:
yt = yt.ravel()
clone_model = clone(model)
clone_model.fit(Xt, yt, Xs, **fit_params)
return clone_model.score(Xs, ys) | 17,730 | 28.01964 | 95 | py |
adapt | adapt-master/adapt/instance_based/_wann.py | """
Weighting Adversarial Neural Network (WANN)
"""
import numpy as np
import tensorflow as tf
from adapt.base import BaseAdaptDeep, make_insert_doc
from adapt.utils import check_network, get_default_task
EPS = np.finfo(np.float32).eps
@make_insert_doc(["task", "weighter"], supervised=True)
class WANN(BaseAdaptDeep):
"""
WANN : Weighting Adversarial Neural Network
WANN is an instance-based domain adaptation method suited for regression tasks.
It supposes the supervised setting where some labeled target data are available.
The goal of WANN is to compute a source instances reweighting which correct
"shifts" between source and target domain. This is done by minimizing the
Y-discrepancy distance between source and target distributions
WANN involves three networks:
- the weighting network which learns the source weights.
- the task network which learns the task.
- the discrepancy network which is used to estimate a distance
between the reweighted source and target distributions: the Y-discrepancy
Parameters
----------
pretrain : bool (default=True)
Weither to perform pretraining of the ``weighter``
network or not. If True, the ``weighter`` is
pretrained in order to predict 1 for each source.
C : float (default=1.)
Clipping constant for the weighting networks
regularization. Low value of ``C`` produce smoother
weighting map. If ``C<=0``, No regularization is added.
Attributes
----------
weighter_ : tensorflow Model
weighting network.
task_ : tensorflow Model
task network.
discriminator_ : tensorflow Model
discriminator network.
history_ : dict
history of the losses and metrics across the epochs.
Examples
--------
>>> from adapt.utils import make_regression_da
>>> from adapt.instance_based import WANN
>>> Xs, ys, Xt, yt = make_regression_da()
>>> model = WANN(Xt=Xt[:10], yt=yt[:10], random_state=0)
>>> model.fit(Xs, ys, epochs=100, verbose=0)
>>> model.score(Xt, yt)
1/1 [==============================] - 0s 106ms/step - loss: 0.1096
0.10955706238746643
References
----------
.. [1] `[1] <https://arxiv.org/pdf/2006.08251.pdf>`_ A. de Mathelin, \
G. Richard, F. Deheeger, M. Mougeot and N. Vayatis "Adversarial Weighting \
for Domain Adaptation in Regression". In ICTAI, 2021.
"""
def __init__(self,
task=None,
weighter=None,
Xt=None,
yt=None,
pretrain=True,
C=1.,
verbose=1,
copy=True,
random_state=None,
**params):
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def _initialize_networks(self):
if self.weighter is None:
self.weighter_ = get_default_task(name="weighter", state=self.random_state)
if self.C > 0.:
self.weighter_ = self._add_regularization(self.weighter_)
else:
if self.C > 0.:
self.weighter_ = self._add_regularization(self.weighter)
self.weighter_ = check_network(self.weighter,
copy=self.copy,
name="weighter")
if self.task is None:
self.task_ = get_default_task(name="task", state=self.random_state)
else:
self.task_ = check_network(self.task,
copy=self.copy,
name="task")
if self.task is None:
self.discriminator_ = get_default_task(name="discriminator", state=self.random_state)
else:
self.discriminator_ = check_network(self.task,
copy=self.copy,
name="discriminator")
def _add_regularization(self, weighter):
for i in range(len(weighter.layers)):
if hasattr(weighter.layers[i], "kernel_constraint"):
setattr(weighter.layers[i],
"kernel_constraint",
tf.keras.constraints.MaxNorm(self.C))
if hasattr(weighter.layers[i], "bias_constraint"):
setattr(weighter.layers[i],
"bias_constraint",
tf.keras.constraints.MaxNorm(self.C))
return weighter
def pretrain_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as tape:
# Forward pass
weights = tf.math.abs(self.weighter_(Xs, training=True))
loss = tf.reduce_mean(
tf.square(weights - tf.ones_like(weights)))
# Compute the loss value
loss += sum(self.weighter_.losses)
# Compute gradients
trainable_vars = self.weighter_.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
logs = {"loss": loss}
return logs
def call(self, X):
return self.task_(X)
def train_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
if self.pretrain_:
return self.pretrain_step(data)
else:
# loss
with tf.GradientTape() as task_tape, tf.GradientTape() as weight_tape, tf.GradientTape() as disc_tape:
# Forward pass
weights = tf.abs(self.weighter_(Xs, training=True))
ys_pred = self.task_(Xs, training=True)
ys_disc = self.discriminator_(Xs, training=True)
yt_pred = self.task_(Xt, training=True)
yt_disc = self.discriminator_(Xt, training=True)
# Reshape
ys_pred = tf.reshape(ys_pred, tf.shape(ys))
ys_disc = tf.reshape(ys_disc, tf.shape(ys))
yt_pred = tf.reshape(yt_pred, tf.shape(yt))
yt_disc = tf.reshape(yt_disc, tf.shape(yt))
# Compute the loss value
task_loss = self.task_loss_(ys, ys_pred)
disc_src = self.task_loss_(ys, ys_disc)
disc_tgt = self.task_loss_(yt, yt_disc)
weights = tf.reshape(weights, tf.shape(task_loss))
task_loss = weights * task_loss
disc_src = weights * disc_src
task_loss = tf.reduce_mean(task_loss)
disc_src = tf.reduce_mean(disc_src)
disc_tgt = tf.reduce_mean(disc_tgt)
disc_loss = disc_src - disc_tgt
weight_loss = task_loss - disc_loss
task_loss += sum(self.task_.losses)
disc_loss += sum(self.discriminator_.losses)
weight_loss += sum(self.weighter_.losses)
# Compute gradients
trainable_vars_task = self.task_.trainable_variables
trainable_vars_weight = self.weighter_.trainable_variables
trainable_vars_disc = self.discriminator_.trainable_variables
gradients_task = task_tape.gradient(task_loss, trainable_vars_task)
gradients_weight = weight_tape.gradient(weight_loss, trainable_vars_weight)
gradients_disc = disc_tape.gradient(disc_loss, trainable_vars_disc)
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer.apply_gradients(zip(gradients_weight, trainable_vars_weight))
self.optimizer.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
return logs
def predict_weights(self, X):
"""
Return the predictions of weighting network
Parameters
----------
X: array
input data
Returns
-------
array:
weights
"""
return np.abs(self.weighter_.predict(X))
def predict_disc(self, X):
"""
Return predictions of the discriminator.
Parameters
----------
X : array
input data
Returns
-------
y_disc : array
predictions of discriminator network
"""
return self.discriminator_.predict(X) | 9,168 | 33.996183 | 114 | py |
adapt | adapt-master/adapt/instance_based/_iwn.py | """
Importance Weighting Network (IWN)
"""
import warnings
import inspect
from copy import deepcopy
import numpy as np
from sklearn.utils import check_array
import tensorflow as tf
from tensorflow.keras import Model
from adapt.base import BaseAdaptDeep, make_insert_doc
from adapt.utils import (check_arrays, check_network, get_default_task,
set_random_seed, check_estimator, check_sample_weight)
EPS = np.finfo(np.float32).eps
def pairwise_euclidean(X, Y):
X2 = tf.tile(tf.reduce_sum(tf.square(X), axis=1, keepdims=True), [1, tf.shape(Y)[0]])
Y2 = tf.tile(tf.reduce_sum(tf.square(Y), axis=1, keepdims=True), [1, tf.shape(X)[0]])
XY = tf.matmul(X, tf.transpose(Y))
return X2 + tf.transpose(Y2) - 2*XY
def weighted_mmd(Xs, Xt, weights, gamma=1.):
n = tf.cast(tf.shape(Xs)[0], Xs.dtype)
m = tf.cast(tf.shape(Xt)[0], Xt.dtype)
gamma = tf.cast(gamma, Xt.dtype)
weights = tf.cast(weights, Xt.dtype)
weights = tf.reshape(weights, (-1, 1))
weights /= (tf.reduce_mean(weights))
Wij = tf.matmul(weights, tf.reshape(weights, (1, -1)))
Mxx = Wij * tf.exp(-gamma * pairwise_euclidean(Xs, Xs))
mxx = tf.reduce_mean(Mxx)
Myy = tf.exp(-gamma * pairwise_euclidean(Xt, Xt))
myy = tf.reduce_mean(Myy)
Mxy = weights * tf.exp(-gamma * pairwise_euclidean(Xs, Xt))
mxy = tf.reduce_mean(Mxy)
return mxx + myy -2*mxy
@make_insert_doc(["estimator", "weighter"])
class IWN(BaseAdaptDeep):
"""
IWN : Importance Weighting Network
IWN is an instance-based method for unsupervised domain adaptation.
The goal of IWN is to reweight the source instances in order to
minimize the Maximum Mean Discreancy (MMD) between the reweighted
source and the target distributions.
IWN uses a weighting network to parameterized the weights of the
source instances. The MMD is computed with gaussian kernels
parameterized by the bandwidth :math:`\sigma`. The :math:`\sigma`
parameter is updated during the IWN optimization in order to
maximize the discriminative power of the MMD.
Parameters
----------
pretrain : bool (default=True)
Weither to perform pretraining of the ``weighter``
network or not. If True, the ``weighter`` is
pretrained in order to predict 1 for each source data.
sigma_init : float (default=.1)
Initialization for the kernel bandwidth
update_sigma : bool (default=True)
Weither to update the kernel bandwidth or not.
If `False`, the bandwidth stay equal to `sigma_init`.
Attributes
----------
weighter_ : tensorflow Model
weighting network.
history_ : dict
history of the losses and metrics across the epochs.
sigma_ : tf.Variable
fitted kernel bandwidth.
Examples
--------
>>> from sklearn.linear_model import RidgeClassifier
>>> from adapt.utils import make_classification_da
>>> from adapt.instance_based import IWN
>>> Xs, ys, Xt, yt = make_classification_da()
>>> model = IWN(RidgeClassifier(0.), Xt=Xt, sigma_init=0.1, random_state=0,
... pretrain=True, pretrain__epochs=100, pretrain__verbose=0)
>>> model.fit(Xs, ys, epochs=100, batch_size=256, verbose=1)
>>> model.score(Xt, yt)
0.78
See also
--------
KMM
WANN
References
----------
.. [1] `[1] <https://arxiv.org/pdf/2209.04215.pdf>`_ A. de Mathelin, F. Deheeger, \
M. Mougeot and N. Vayatis "Fast and Accurate Importance Weighting for \
Correcting Sample Bias" In ECML-PKDD, 2022.
"""
def __init__(self,
estimator=None,
weighter=None,
Xt=None,
yt=None,
pretrain=True,
sigma_init=.1,
update_sigma=True,
verbose=1,
copy=True,
random_state=None,
**params):
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def _initialize_networks(self):
if self.weighter is None:
self.weighter_ = get_default_task(name="weighter", state=self.random_state)
else:
self.weighter_ = check_network(self.weighter,
copy=self.copy,
name="weighter")
self.sigma_ = tf.Variable(self.sigma_init,
trainable=self.update_sigma)
def pretrain_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as tape:
# Forward pass
weights = tf.math.abs(self.weighter_(Xs, training=True))
loss = tf.reduce_mean(
tf.square(weights - tf.ones_like(weights)))
# Compute the loss value
loss += sum(self.weighter_.losses)
# Compute gradients
trainable_vars = self.weighter_.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
logs = {"loss": loss}
return logs
def call(self, X):
return self.weighter_(X)
def train_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
if self.pretrain_:
return self.pretrain_step(data)
else:
# loss
with tf.GradientTape() as weight_tape, tf.GradientTape() as sigma_tape:
# Forward pass
weights = tf.abs(self.weighter_(Xs, training=True))
loss = weighted_mmd(Xs, Xt, weights, self.sigma_)
loss_sigma = -loss
loss += sum(self.weighter_.losses)
# Compute gradients
trainable_vars = self.weighter_.trainable_variables
gradients = weight_tape.gradient(loss, trainable_vars)
gradients_sigma = sigma_tape.gradient(loss_sigma, [self.sigma_])
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.optimizer.apply_gradients(zip(gradients_sigma, [self.sigma_]))
# Return a dict mapping metric names to current value
logs = {"loss": loss, "sigma": self.sigma_}
return logs
def fit(self, X, y=None, Xt=None, yt=None, domains=None,
fit_params_estimator={}, **fit_params):
weights = self.fit_weights(X, Xt, **fit_params)
self.fit_estimator(X, y, sample_weight=weights, **fit_params_estimator)
return self
def fit_weights(self, Xs, Xt, **fit_params):
"""
Fit importance weighting.
Parameters
----------
Xs : array
Input source data.
Xt : array
Input target data.
fit_params : key, value arguments
Arguments given to the fit method of the model
(epochs, batch_size, callbacks...).
Returns
-------
weights_ : sample weights
"""
super().fit(Xs, np.zeros(len(Xs)), Xt, None, None, **fit_params)
return self.predict_weights(Xs)
def fit_estimator(self, X, y, sample_weight=None,
random_state=None, warm_start=True,
**fit_params):
"""
Fit estimator on X, y.
Parameters
----------
X : array
Input data.
y : array
Output data.
sample_weight : array
Importance weighting.
random_state : int (default=None)
Seed of the random generator
warm_start : bool (default=True)
If True, continue to fit ``estimator_``,
else, a new estimator is fitted based on
a copy of ``estimator``. (Be sure to set
``copy=True`` to use ``warm_start=False``)
fit_params : key, value arguments
Arguments given to the fit method of
the estimator and to the compile method
for tensorflow estimator.
Returns
-------
estimator_ : fitted estimator
"""
X, y = check_arrays(X, y, accept_sparse=True)
set_random_seed(random_state)
if (not warm_start) or (not hasattr(self, "estimator_")):
estimator = self.estimator
self.estimator_ = check_estimator(estimator,
copy=self.copy,
force_copy=True)
if isinstance(self.estimator_, Model):
compile_params = {}
if estimator._is_compiled:
compile_params["loss"] = deepcopy(estimator.loss)
compile_params["optimizer"] = deepcopy(estimator.optimizer)
else:
raise ValueError("The given `estimator` argument"
" is not compiled yet. "
"Please give a compiled estimator or "
"give a `loss` and `optimizer` arguments.")
self.estimator_.compile(**compile_params)
fit_args = [
p.name
for p in inspect.signature(self.estimator_.fit).parameters.values()
if p.name != "self" and p.kind != p.VAR_KEYWORD
]
if "sample_weight" in fit_args:
sample_weight = check_sample_weight(sample_weight, X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.estimator_.fit(X, y,
sample_weight=sample_weight,
**fit_params)
else:
if sample_weight is None:
self.estimator_.fit(X, y, **fit_params)
else:
sample_weight = check_sample_weight(sample_weight, X)
sample_weight /= sample_weight.sum()
bootstrap_index = np.random.choice(
len(X), size=len(X), replace=True,
p=sample_weight)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.estimator_.fit(X[bootstrap_index],
y[bootstrap_index],
**fit_params)
return self.estimator_
def predict_weights(self, X):
"""
Return the predictions of weighting network
Parameters
----------
X: array
input data
Returns
-------
array:
weights
"""
return np.abs(self.weighter_.predict(X)).ravel()
def predict(self, X, domain=None, **predict_params):
"""
Return estimator predictions
Parameters
----------
X : array
input data
domain : str (default=None)
Not used. For compatibility with `adapt` objects
Returns
-------
y_pred : array
prediction of the Adapt Model.
"""
X = check_array(X, ensure_2d=True, allow_nd=True, accept_sparse=True)
return self.estimator_.predict(X, **predict_params)
def score(self, X, y, sample_weight=None, domain=None):
"""
Return the estimator score.
Call `score` on sklearn estimator and
`evaluate` on tensorflow Model.
Parameters
----------
X : array
input data
y : array
output data
sample_weight : array (default=None)
Sample weights
domain : str (default=None)
Not used.
Returns
-------
score : float
estimator score.
"""
X, y = check_arrays(X, y, accept_sparse=True)
if hasattr(self.estimator_, "score"):
score = self.estimator_.score(X, y, sample_weight)
elif hasattr(self.estimator_, "evaluate"):
if np.prod(X.shape) <= 10**8:
score = self.estimator_.evaluate(
X, y,
sample_weight=sample_weight,
batch_size=len(X)
)
else:
score = self.estimator_.evaluate(
X, y,
sample_weight=sample_weight
)
if isinstance(score, (tuple, list)):
score = score[0]
else:
raise ValueError("Estimator does not implement"
" score or evaluate method")
return score | 13,414 | 31.639903 | 89 | py |
adapt | adapt-master/adapt/feature_based/_cdan.py | """
CDAN
"""
import numpy as np
import tensorflow as tf
from adapt.base import BaseAdaptDeep, make_insert_doc
from tensorflow.keras.initializers import GlorotUniform
from adapt.utils import (check_network,
get_default_encoder,
get_default_discriminator)
EPS = np.finfo(np.float32).eps
def _get_default_classifier(name=None, state=None):
model = tf.keras.Sequential(name=name)
model.add(tf.keras.layers.Flatten())
if state is None:
model.add(tf.keras.layers.Dense(10, activation="relu"))
model.add(tf.keras.layers.Dense(10, activation="relu"))
model.add(tf.keras.layers.Dense(2, activation="softmax"))
else:
model.add(tf.keras.layers.Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
model.add(tf.keras.layers.Dense(10, activation="relu",
kernel_initializer=GlorotUniform(seed=state)))
model.add(tf.keras.layers.Dense(2, activation="softmax",
kernel_initializer=GlorotUniform(seed=state)))
return model
@make_insert_doc(["encoder"])
class CDAN(BaseAdaptDeep):
"""
CDAN: Conditional Adversarial Domain Adaptation
CDAN is an unsupervised domain adaptation method on the model of the
:ref:`DANN <adapt.feature_based.DANN>`. In CDAN the discriminator
is conditioned on the prediction of the task network for
source and target data. This should , in theory, focus the
source-target matching of instances belonging to the same class.
To condition the **discriminator** network on each class, a
multilinear map of shape: ``nb_class * encoder.output_shape[1]``
is given as input. If the shape is too large (>4096), a random
sub-multilinear map of lower dimension is considered.
The optimization formulation of CDAN is the following:
.. math::
\min_{\phi, F} & \; \mathcal{L}_{task}(F(\phi(X_S)), y_S) -
\lambda \\left( \log(1 - D(\phi(X_S) \\otimes F(X_S)) +
\log(D(\phi(X_T) \\otimes F(X_T)) \\right) \\\\
\max_{D} & \; \log(1 - D(\phi(X_S) \\otimes F(X_S)) +
\log(D(\phi(X_T) \\otimes F(X_T))
Where:
- :math:`(X_S, y_S), (X_T)` are respectively the labeled source data
and the unlabeled target data.
- :math:`\phi, F, D` are respectively the **encoder**, the **task**
and the **discriminator** networks
- :math:`\lambda` is the trade-off parameter.
- :math:`\phi(X_S) \\otimes F(X_S)` is the multilinear map between
the encoded sources and the task predictions.
In CDAN+E, an entropy regularization is added to prioritize the
transfer of easy-to-transfer exemples. The optimization formulation
of CDAN+E is the following:
.. math::
\min_{\phi, F} & \; \mathcal{L}_{task}(F(\phi(X_S)), y_S) -
\lambda \\left( \log(1 - W_S D(\phi(X_S) \\otimes F(X_S)) +
W_T \log(D(\phi(X_T) \\otimes F(X_T)) \\right) \\\\
\max_{D} & \; \log(1 - W_S D(\phi(X_S) \\otimes F(X_S)) +
W_T \log(D(\phi(X_T) \\otimes F(X_T))
Where:
- :math:`W_S = 1+\exp^{-\\text{ent}(F(X_S))}`
- :math:`\\text{ent}(F(X_S)) = - \sum_{i < C} F(X_S)_i \log(F(X_S)_i)`
with :math:`C` the number of classes.
.. figure:: ../_static/images/cdan.png
:align: center
CDAN architecture (source: [1])
Notes
-----
CDAN is specific for multi-class classification tasks. Be sure to add a
softmax activation at the end of the task network.
Parameters
----------
task : tensorflow Model (default=None)
Task netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as task network.
``task`` should end with a softmax activation.
discriminator : tensorflow Model (default=None)
Discriminator netwok. If ``None``, a two layers network with 10
neurons per layer and ReLU activation is used as discriminator
network. Note that the output shape of the discriminator should
be ``(None, 1)`` and the input shape:
``(None, encoder.output_shape[1] * nb_class)``.
lambda_ : float or None (default=1)
Trade-off parameter. This parameter gives the trade-off
for the encoder between learning the task and matching
the source and target distribution. If `lambda_`is small
the encoder will focus on the task. If `lambda_=0`, CDAN
is equivalent to a "source only" method.
entropy : boolean (default=True)
Whether to use or not the entropy regularization.
Adding this regularization will prioritize the
``discriminator`` on easy-to-transfer examples.
This, in theory, should make the transfer "safer".
max_features : int (default=4096)
If ``encoder.output_shape[1] * nb_class)`` is higer than
``max_features`` the multilinear map is produced with
considering random sub vectors of the encoder and task outputs.
Attributes
----------
encoder_ : tensorflow Model
encoder network.
task_ : tensorflow Model
task network.
discriminator_ : tensorflow Model
discriminator network.
history_ : dict
history of the losses and metrics across the epochs.
If ``yt`` is given in ``fit`` method, target metrics
and losses are recorded too.
See also
--------
DANN
ADDA
WDGRL
Examples
--------
>>> import numpy as np
>>> from adapt.utils import make_classification_da
>>> from adapt.feature_based import CDAN
>>> Xs, ys, Xt, yt = make_classification_da()
>>> ys = np.stack([ys, np.abs(1-ys)], 1)
>>> yt = np.stack([yt, np.abs(1-yt)], 1)
>>> model = CDAN(lambda_=0.1, Xt=Xt, metrics=["acc"], random_state=0)
>>> model.fit(Xs, ys, epochs=100, verbose=0)
>>> model.score(Xt, yt)
1/1 [==============================] - 0s 106ms/step - loss: 0.1081 - acc: 0.8400
0.10809497535228729
References
----------
.. [1] `[1] <https://arxiv.org/pdf/1705.10667.pdf>`_ Long, M., Cao, \
Z., Wang, J., and Jordan, M. I. "Conditional adversarial domain adaptation". \
In NIPS, 2018
"""
def __init__(self,
encoder=None,
task=None,
discriminator=None,
Xt=None,
yt=None,
lambda_=1.,
entropy=True,
max_features=4096,
verbose=1,
copy=True,
random_state=None,
**params):
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def train_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as task_tape, tf.GradientTape() as enc_tape, tf.GradientTape() as disc_tape:
# Forward pass
Xs_enc = self.encoder_(Xs, training=True)
ys_pred = self.task_(Xs_enc, training=True)
Xt_enc = self.encoder_(Xt, training=True)
yt_pred = self.task_(Xt_enc, training=True)
if self.is_overloaded_:
mapping_task_src = tf.matmul(ys_pred, self._random_task)
mapping_enc_src = tf.matmul(Xs_enc, self._random_enc)
mapping_src = tf.multiply(mapping_enc_src, mapping_task_src)
mapping_src /= (tf.math.sqrt(tf.cast(self.max_features, tf.float32)) + EPS)
mapping_task_tgt = tf.matmul(yt_pred, self._random_task)
mapping_enc_tgt = tf.matmul(Xt_enc, self._random_enc)
mapping_tgt = tf.multiply(mapping_enc_tgt, mapping_task_tgt)
mapping_tgt /= (tf.math.sqrt(tf.cast(self.max_features, tf.float32)) + EPS)
else:
mapping_src = tf.matmul(
tf.expand_dims(Xs_enc, 2),
tf.expand_dims(ys_pred, 1))
mapping_tgt = tf.matmul(
tf.expand_dims(Xt_enc, 2),
tf.expand_dims(yt_pred, 1))
dim = int(np.prod(mapping_src.get_shape()[1:]))
mapping_src = tf.reshape(mapping_src, (-1, dim))
mapping_tgt = tf.reshape(mapping_tgt, (-1, dim))
ys_disc = self.discriminator_(mapping_src)
yt_disc = self.discriminator_(mapping_tgt)
if self.entropy:
entropy_src = -tf.reduce_sum(ys_pred *
tf.math.log(ys_pred+EPS),
axis=1, keepdims=True)
entropy_tgt = -tf.reduce_sum(yt_pred *
tf.math.log(yt_pred+EPS),
axis=1, keepdims=True)
weight_src = 1.+tf.exp(-entropy_src)
weight_tgt = 1.+tf.exp(-entropy_tgt)
weight_src /= (tf.reduce_mean(weight_src) + EPS)
weight_tgt /= (tf.reduce_mean(weight_tgt) + EPS)
weight_src *= .5
weight_tgt *= .5
assert str(weight_src.shape) == str(ys_disc.shape)
assert str(weight_tgt.shape) == str(yt_disc.shape)
disc_loss = (-weight_src*tf.math.log(ys_disc + EPS)
-weight_tgt*tf.math.log(1-yt_disc + EPS))
else:
disc_loss = (-tf.math.log(ys_disc + EPS)
-tf.math.log(1-yt_disc + EPS))
# Reshape
ys_pred = tf.reshape(ys_pred, tf.shape(ys))
# Compute the loss value
task_loss = self.task_loss_(ys, ys_pred)
task_loss = tf.reduce_mean(task_loss)
disc_loss = tf.reduce_mean(disc_loss)
enc_loss = task_loss - self.lambda_ * disc_loss
task_loss += sum(self.task_.losses)
disc_loss += sum(self.discriminator_.losses)
enc_loss += sum(self.encoder_.losses)
# Compute gradients
trainable_vars_task = self.task_.trainable_variables
trainable_vars_enc = self.encoder_.trainable_variables
trainable_vars_disc = self.discriminator_.trainable_variables
gradients_task = task_tape.gradient(task_loss, trainable_vars_task)
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
gradients_disc = disc_tape.gradient(disc_loss, trainable_vars_disc)
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
disc_metrics = self._get_disc_metrics(ys_disc, yt_disc)
logs.update({"disc_loss": disc_loss})
logs.update(disc_metrics)
return logs
def _get_disc_metrics(self, ys_disc, yt_disc):
disc_dict = {}
for m in self.disc_metrics:
disc_dict["disc_%s"%m.name] = tf.reduce_mean(0.5 * (
m(tf.ones_like(ys_disc), ys_disc)+
m(tf.zeros_like(yt_disc), yt_disc)
))
return disc_dict
def _initialize_weights(self, shape_X):
self(np.zeros((1,) + shape_X))
Xs_enc = self.encoder_(np.zeros((1,) + shape_X), training=True)
ys_pred = self.task_(Xs_enc, training=True)
if Xs_enc.get_shape()[1] * ys_pred.get_shape()[1] > self.max_features:
self.is_overloaded_ = True
self._random_task = tf.random.normal([ys_pred.get_shape()[1],
self.max_features])
self._random_enc = tf.random.normal([Xs_enc.get_shape()[1],
self.max_features])
self.discriminator_(np.zeros((1, self.max_features)))
else:
self.is_overloaded_ = False
self.discriminator_(np.zeros((1, Xs_enc.get_shape()[1] * ys_pred.get_shape()[1])))
def _initialize_networks(self):
if self.encoder is None:
self.encoder_ = get_default_encoder(name="encoder", state=self.random_state)
else:
self.encoder_ = check_network(self.encoder,
copy=self.copy,
name="encoder")
if self.task is None:
self.task_ = _get_default_classifier(name="task", state=self.random_state)
else:
self.task_ = check_network(self.task,
copy=self.copy,
name="task")
if self.discriminator is None:
self.discriminator_ = get_default_discriminator(name="discriminator", state=self.random_state)
else:
self.discriminator_ = check_network(self.discriminator,
copy=self.copy,
name="discriminator")
# def _initialize_networks(self, shape_Xt):
# Call predict to avoid strange behaviour with
# Sequential model whith unspecified input_shape
# zeros_enc_ = self.encoder_.predict(np.zeros((1,) + shape_Xt));
# zeros_task_ = self.task_.predict(zeros_enc_);
# if zeros_task_.shape[1] * zeros_enc_.shape[1] > self.max_features:
# self.discriminator_.predict(np.zeros((1, self.max_features)))
# else:
# zeros_mapping_ = np.matmul(np.expand_dims(zeros_enc_, 2),
# np.expand_dims(zeros_task_, 1))
# zeros_mapping_ = np.reshape(zeros_mapping_, (1, -1))
# self.discriminator_.predict(zeros_mapping_);
def predict_disc(self, X):
X_enc = self.encoder_.predict(X)
X_task = self.task_.predict(X_enc)
if X_enc.shape[1] * X_task.shape[1] > self.max_features:
X_enc = X_enc.dot(self._random_enc.numpy())
X_task = X_task.dot(self._random_task.numpy())
X_disc = X_enc * X_task
X_disc /= np.sqrt(self.max_features)
else:
X_disc = np.matmul(np.expand_dims(X_enc, 2),
np.expand_dims(X_task, 1))
# X_disc = X_disc.transpose([0, 2, 1])
X_disc = X_disc.reshape(-1, X_enc.shape[1] * X_task.shape[1])
y_disc = self.discriminator_.predict(X_disc)
return y_disc | 15,371 | 40.433962 | 118 | py |
adapt | adapt-master/adapt/feature_based/_adda.py | """
DANN
"""
import numpy as np
import tensorflow as tf
from adapt.base import BaseAdaptDeep, make_insert_doc
from adapt.utils import check_network
EPS = np.finfo(np.float32).eps
# class SetEncoder(tf.keras.callbacks.Callback):
# def __init__(self):
# self.pretrain = True
# def on_epoch_end(self, epoch, logs=None):
# if (not logs.get("pretrain")) and self.pretrain:
# self.pretrain = False
# self.model.encoder_.set_weights(
# self.model.encoder_src_.get_weights())
@make_insert_doc(["encoder", "task", "discriminator"])
class ADDA(BaseAdaptDeep):
"""
ADDA: Adversarial Discriminative Domain Adaptation
ADDA is a feature-based domain adaptation method.
The purpose of ADDA is to build a new feature representation
in which source and target data could not be distinguished by
any **discriminator** network. This feature representation is
built with two **encoder** networks:
- a **source encoder** trained to provide good features in order
to learn the task on the source domain. The task is learned
through a **task** network trained with the **source encoder**.
- a **target encoder** trained to fool a **discriminator** network
which tries to classify source and target data in the encoded space.
The **target encoder** and the **discriminator** are trained
in an adversarial fashion in the same way as GAN.
The parameters of the four networks are optimized in a two stage
algorithm where **source encoder** and **task** networks are first
fitted according to the following optimization problem:
.. math::
\min_{\phi_S, F} \mathcal{L}_{task}(F(\phi_S(X_S)), y_S)
In the second stage, **target encoder** and **discriminator**
networks are fitted according to:
.. math::
\min_{\phi_T} & \; - \log(D(\phi_T(X_T)))) \\\\
\min_{D} & \; - \log(D(\phi_S(X_S))) - \log(1 - D(\phi_T(X_T)))
Where:
- :math:`(X_S, y_S), (X_T)` are respectively the labeled source data
and the unlabeled target data.
- :math:`\phi_S, \phi_T, F, D` are respectively the **source encoder**,
the **target encoder**, the **task** and the **discriminator** networks.
The method has been originally introduced for **unsupervised**
classification DA but it could be widen to other task in **supervised**
DA straightforwardly.
.. figure:: ../_static/images/adda.png
:align: center
Overview of the ADDA approach (source: [1])
Parameters
----------
pretrain : bool (default=True)
Weither to perform pretraining of the ``encoder_src_``
and ``task_`` networks on source data or not.
separated compile and fit arguments for the
pretraining can be given by using the prefix
``pretrain__`` as ``pretrain__epochs=10`` or
``pretrain__learning_rate=0.1`` for instance.
If no pretrain arguments are given, the training
arguments are used by default
tol : float (default=0.001)
Tolerance on the loss for early stopping of
pretraining.
Attributes
----------
encoder_ : tensorflow Model
encoder network.
task_ : tensorflow Model
task network.
discriminator_ : tensorflow Model
discriminator network.
encoder_src_ : tensorflow Model
Source encoder network
history_ : dict
history of the losses and metrics across the epochs.
If ``yt`` is given in ``fit`` method, target metrics
and losses are recorded too.
Examples
--------
>>> from adapt.utils import make_classification_da
>>> from adapt.feature_based import ADDA
>>> Xs, ys, Xt, yt = make_classification_da()
>>> model = ADDA(Xt=Xt, metrics=["acc"], random_state=0)
>>> model.fit(Xs, ys, epochs=100, verbose=0)
>>> model.score(Xt, yt)
1/1 [==============================] - 0s 153ms/step - loss: 0.0960 - acc: 0.9300
0.09596743434667587
See also
--------
DANN
DeepCORAL
References
----------
.. [1] `[1] <https://arxiv.org/pdf/1702.05464.pdf>`_ E. Tzeng, J. Hoffman, \
K. Saenko, and T. Darrell. "Adversarial discriminative domain adaptation". \
In CVPR, 2017.
"""
def __init__(self,
encoder=None,
task=None,
discriminator=None,
Xt=None,
pretrain=True,
tol=0.001,
copy=True,
verbose=1,
random_state=None,
**params):
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def _initialize_pretain_networks(self):
self.encoder_.set_weights(
self.encoder_src_.get_weights())
def pretrain_step(self, data):
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as task_tape, tf.GradientTape() as enc_tape:
# Forward pass
Xs_enc = self.encoder_src_(Xs, training=True)
ys_pred = self.task_(Xs_enc, training=True)
# Reshape
ys_pred = tf.reshape(ys_pred, tf.shape(ys))
# Compute the loss value
loss = tf.reduce_mean(self.task_loss_(ys, ys_pred))
task_loss = loss + sum(self.task_.losses)
enc_loss = loss + sum(self.encoder_src_.losses)
# Compute gradients
trainable_vars_task = self.task_.trainable_variables
trainable_vars_enc = self.encoder_src_.trainable_variables
gradients_task = task_tape.gradient(task_loss, trainable_vars_task)
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
# Update weights
self.optimizer.apply_gradients(zip(gradients_task, trainable_vars_task))
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
# Update metrics
self.compiled_metrics.update_state(ys, ys_pred)
self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
logs = {m.name: m.result() for m in self.metrics}
return logs
def train_step(self, data):
# Pretrain
if self.pretrain_:
return self.pretrain_step(data)
else:
# Unpack the data.
Xs, Xt, ys, yt = self._unpack_data(data)
# loss
with tf.GradientTape() as enc_tape, tf.GradientTape() as disc_tape:
# Forward pass
if self.pretrain:
Xs_enc = self.encoder_src_(Xs, training=False)
else:
# encoder src is not needed if pretrain=False
Xs_enc = Xs
ys_disc = self.discriminator_(Xs_enc, training=True)
Xt_enc = self.encoder_(Xt, training=True)
yt_disc = self.discriminator_(Xt_enc, training=True)
# Compute the loss value
disc_loss = (-tf.math.log(ys_disc + EPS)
-tf.math.log(1-yt_disc + EPS))
enc_loss = -tf.math.log(yt_disc + EPS)
disc_loss = tf.reduce_mean(disc_loss)
enc_loss = tf.reduce_mean(enc_loss)
disc_loss += sum(self.discriminator_.losses)
enc_loss += sum(self.encoder_.losses)
# Compute gradients
trainable_vars_enc = self.encoder_.trainable_variables
trainable_vars_disc = self.discriminator_.trainable_variables
gradients_enc = enc_tape.gradient(enc_loss, trainable_vars_enc)
gradients_disc = disc_tape.gradient(disc_loss, trainable_vars_disc)
# Update weights
self.optimizer_enc.apply_gradients(zip(gradients_enc, trainable_vars_enc))
self.optimizer_disc.apply_gradients(zip(gradients_disc, trainable_vars_disc))
# Update metrics
# self.compiled_metrics.update_state(ys, ys_pred)
# self.compiled_loss(ys, ys_pred)
# Return a dict mapping metric names to current value
# logs = {m.name: m.result() for m in self.metrics}
logs = self._get_disc_metrics(ys_disc, yt_disc)
return logs
def _get_disc_metrics(self, ys_disc, yt_disc):
disc_dict = {}
disc_dict["disc_loss"] = tf.reduce_mean(
(-tf.math.log(ys_disc + EPS)
-tf.math.log(1-yt_disc + EPS))
)
for m in self.disc_metrics:
disc_dict["disc_%s"%m.name] = tf.reduce_mean(0.5 * (
m(tf.ones_like(ys_disc), ys_disc)+
m(tf.zeros_like(yt_disc), yt_disc)
))
return disc_dict
def _initialize_weights(self, shape_X):
# Init weights encoder
self(np.zeros((1,) + shape_X))
# Set same weights to encoder_src
if self.pretrain:
# encoder src is not needed if pretrain=False
self.encoder_(np.zeros((1,) + shape_X))
self.encoder_src_ = check_network(self.encoder_,
copy=True,
name="encoder_src")
def transform(self, X, domain="tgt"):
"""
Return the encoded features of X.
Parameters
----------
X : array
input data
domain: str (default="tgt")
If domain is ``"tgt"`` or ``"target"``,
the target encoder is used.
If domain is ``"src"`` or ``"source"``,
the source encoder is used.
Returns
-------
X_enc : array
predictions of encoder network
"""
if domain in ["tgt", "target"]:
return self.encoder_.predict(X)
elif domain in ["src", "source"]:
return self.encoder_src_.predict(X)
else:
raise ValueError("`domain `argument "
"should be `tgt` or `src`, "
"got, %s"%domain)
def predict_disc(self, X, domain="tgt"):
"""
Return predictions of the discriminator on the encoded features.
Parameters
----------
X : array
input data
domain: str (default="tgt")
If domain is ``"tgt"`` or ``"target"``,
the target encoder is used.
If domain is ``"src"`` or ``"source"``,
the source encoder is used.
Returns
-------
y_disc : array
predictions of discriminator network
"""
return self.discriminator_.predict(self.transform(X, domain=domain))
def predict_task(self, X, domain="tgt"):
"""
Return predictions of the task on the encoded features.
Parameters
----------
X : array
input data
domain: str (default="tgt")
If domain is ``"tgt"`` or ``"target"``,
the target encoder is used.
If domain is ``"src"`` or ``"source"``,
the source encoder is used.
Returns
-------
y_task : array
predictions of task network
"""
return self.task_.predict(self.transform(X, domain=domain))
| 11,893 | 32.694051 | 102 | py |
adapt | adapt-master/adapt/parameter_based/_regular.py | """
Regular Transfer
"""
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from scipy.sparse.linalg import lsqr
from sklearn.gaussian_process import GaussianProcessRegressor, GaussianProcessClassifier
from sklearn.linear_model import LinearRegression
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense
from adapt.base import BaseAdaptEstimator, BaseAdaptDeep, make_insert_doc
from adapt.utils import (check_arrays,
set_random_seed,
check_estimator,
check_network,
check_fitted_estimator,
get_default_task)
@make_insert_doc(supervised=True)
class RegularTransferLR(BaseAdaptEstimator):
"""
Regular Transfer with Linear Regression
RegularTransferLR is a parameter-based domain adaptation method.
The method is based on the assumption that a good target estimator
can be obtained by adapting the parameters of a pre-trained source
estimator using a few labeled target data.
The approach consist in fitting a linear estimator on target data
according to an objective function regularized by the euclidean
distance between source and target parameters:
.. math::
\\beta_T = \\underset{\\beta \in \\mathbb{R}^p}{\\text{argmin}}
\\, ||X_T\\beta - y_T||^2 + \\lambda ||\\beta - \\beta_S||^2
Where:
- :math:`\\beta_T` are the target model parameters.
- :math:`\\beta_S = \\underset{\\beta \\in \\mathbb{R}^p}{\\text{argmin}}
\\, ||X_S\\beta - y_S||^2` are the source model parameters.
- :math:`(X_S, y_S), (X_T, y_T)` are respectively the source and
the target labeled data.
- :math:`p` is the number of features in :math:`X_T`
(:math:`+1` if ``intercept`` is True).
- :math:`\\lambda` is a trade-off parameter.
Parameters
----------
lambda_ : float (default=1.0)
Trade-Off parameter.
Attributes
----------
estimator_ : Same class as estimator
Fitted Estimator.
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> from adapt.utils import make_regression_da
>>> from adapt.parameter_based import RegularTransferLR
>>> Xs, ys, Xt, yt = make_regression_da()
>>> src_model = Ridge()
>>> src_model.fit(Xs, ys)
>>> print(src_model.score(Xt, yt))
0.6771931378706197
>>> tgt_model = RegularTransferLR(src_model, lambda_=1.)
>>> tgt_model.fit(Xt[:3], yt[:3])
>>> tgt_model.score(Xt, yt)
0.6454964910964297
See also
--------
RegularTransferLC, RegularTransferNN
References
----------
.. [1] `[1] <https://www.microsoft.com/en-us/research/wp-\
content/uploads/2004/07/2004-chelba-emnlp.pdf>`_ C. Chelba and \
A. Acero. "Adaptation of maximum entropy classifier: Little data \
can help a lot". In EMNLP, 2004.
"""
def __init__(self,
estimator=None,
Xt=None,
yt=None,
lambda_=1.,
copy=True,
verbose=1,
random_state=None,
**params):
if not hasattr(estimator, "coef_"):
raise ValueError("`estimator` argument has no ``coef_`` attribute, "
"please call `fit` on `estimator` or use "
"another estimator as `LinearRegression` or "
"`RidgeClassifier`.")
estimator = check_fitted_estimator(estimator)
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def fit(self, Xt=None, yt=None, **fit_params):
"""
Fit RegularTransferLR.
Parameters
----------
Xt : numpy array (default=None)
Target input data.
yt : numpy array (default=None)
Target output data.
fit_params : key, value arguments
Not used. Here for sklearn compatibility.
Returns
-------
self : returns an instance of self
"""
Xt, yt = self._get_target_data(Xt, yt)
Xt, yt = check_arrays(Xt, yt)
set_random_seed(self.random_state)
self.estimator_ = check_estimator(self.estimator,
copy=self.copy,
force_copy=True)
if self.estimator_.fit_intercept:
intercept_ = np.reshape(
self.estimator_.intercept_,
np.ones(self.estimator_.coef_.shape).mean(-1, keepdims=True).shape)
beta_src = np.concatenate((
intercept_,
self.estimator_.coef_
), axis=-1)
Xt = np.concatenate(
(np.ones((len(Xt), 1)), Xt),
axis=-1)
else:
beta_src = self.estimator_.coef_
yt_ndim_below_one_ = False
if yt.ndim <= 1:
yt = yt.reshape(-1, 1)
yt_ndim_below_one_ = True
if beta_src.ndim <= 1:
beta_src = beta_src.reshape(1, -1)
if beta_src.shape[0] != yt.shape[1]:
raise ValueError("The number of features of `yt`"
" does not match the number of coefs in 'estimator', "
"expected %i, got %i"%(beta_src.shape[0], yt.shape[1]))
if beta_src.shape[1] != Xt.shape[1]:
beta_shape = beta_src.shape[1]; Xt_shape = Xt.shape[1]
if self.estimator_.fit_intercept:
beta_shape -= 1; Xt_shape -= 1
raise ValueError("The number of features of `Xt`"
" does not match the number of coefs in 'estimator', "
"expected %i, got %i"%(beta_shape, Xt_shape))
beta_tgt = []
for i in range(yt.shape[1]):
sol = lsqr(A=Xt, b=yt[:, i], damp=self.lambda_, x0=beta_src[i, :])
beta_tgt.append(sol[0])
beta_tgt = np.stack(beta_tgt, axis=0)
if self.estimator_.fit_intercept:
self.coef_ = beta_tgt[:, 1:]
self.intercept_ = beta_tgt[:, 0]
else:
self.coef_ = beta_tgt
if yt_ndim_below_one_:
self.coef_ = self.coef_.reshape(-1)
if self.estimator_.fit_intercept:
self.intercept_ = self.intercept_[0]
self.estimator_.coef_ = self.coef_
if self.estimator_.fit_intercept:
self.estimator_.intercept_ = self.intercept_
return self
@make_insert_doc(supervised=True)
class RegularTransferLC(RegularTransferLR):
"""
Regular Transfer for Linear Classification
RegularTransferLC is a parameter-based domain adaptation method.
This classifier first converts the target values into ``{-1, 1}``
and then treats the problem as a regression task
(multi-output regression in the multiclass case). It then fits
the target data as a ``RegularTransferLR`` regressor, i.e it
performs the following optimization:
.. math::
\\beta_T = \\underset{\\beta \in \\mathbb{R}^p}{\\text{argmin}}
\\, ||X_T\\beta - y_T||^2 + \\lambda ||\\beta - \\beta_S||^2
Where:
- :math:`\\beta_T` are the target model parameters.
- :math:`\\beta_S = \\underset{\\beta \\in \\mathbb{R}^p}{\\text{argmin}}
\\, ||X_S\\beta - y_S||^2` are the source model parameters.
- :math:`(X_S, y_S), (X_T, y_T)` are respectively the source and
the target labeled data.
- :math:`p` is the number of features in :math:`X_T`
(:math:`+1` if ``intercept`` is True).
- :math:`\\lambda` is a trade-off parameter.
Parameters
----------
lambda_ : float (default=1.0)
Trade-Off parameter.
Attributes
----------
estimator_ : Same class as estimator
Fitted Estimator.
Examples
--------
>>> from sklearn.linear_model import RidgeClassifier
>>> from adapt.utils import make_classification_da
>>> from adapt.parameter_based import RegularTransferLC
>>> Xs, ys, Xt, yt = make_classification_da()
>>> src_model = RidgeClassifier()
>>> src_model.fit(Xs, ys)
>>> print(src_model.score(Xt, yt))
0.88
>>> tgt_model = RegularTransferLC(src_model, lambda_=10.)
>>> tgt_model.fit(Xt[:3], yt[:3])
>>> tgt_model.score(Xt, yt)
0.92
See also
--------
RegularTransferLR, RegularTransferNN
References
----------
.. [1] `[1] <https://www.microsoft.com/en-us/research/wp-\
content/uploads/2004/07/2004-chelba-emnlp.pdf>`_ C. Chelba and \
A. Acero. "Adaptation of maximum entropy classifier: Little data \
can help a lot". In EMNLP, 2004.
"""
### TODO reshape yt for multiclass.
def fit(self, Xt=None, yt=None, **fit_params):
Xt, yt = self._get_target_data(Xt, yt)
Xt, yt = check_arrays(Xt, yt)
_label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
_label_binarizer.fit(self.estimator.classes_)
yt = _label_binarizer.transform(yt)
print(yt.shape)
return super().fit(Xt, yt, **fit_params)
@make_insert_doc(["task"], supervised=True)
class RegularTransferNN(BaseAdaptDeep):
"""
Regular Transfer with Neural Network
RegularTransferNN is a parameter-based domain adaptation method.
The method is based on the assumption that a good target estimator
can be obtained by adapting the parameters of a pre-trained source
estimator using a few labeled target data.
The approach consist in fitting a neural network on target data
according to an objective function regularized by the euclidean
distance between source and target parameters:
.. math::
\\beta_T = \\underset{\\beta=(\\beta_1, ... , \\beta_D)}{\\text{argmin}}
\\, ||f(X_T, \\beta) - y_T||^2 + \sum_{i=1}^{D}
\\lambda_i ||\\beta_i - {\\beta_S}_i||^2
Where:
- :math:`f` is a neural network with :math:`D` layers.
- :math:`\\beta_T` are the parameters of the target neural network.
- :math:`\\beta_S = \\underset{\\beta}{\\text{argmin}}
\\, ||f(X_S,\\beta) - y_S||^2` are the source neural network parameters.
- :math:`(X_S, y_S), (X_T, y_T)` are respectively the source and
the target labeled data.
- :math:`\\lambda_i` is the trade-off parameter of layer :math:`i`.
Different trade-off can be given to the layer of the
neural network through the ``lambdas`` parameter.
Parameters
----------
lambdas : float or list of float, optional (default=1.0)
Trade-off parameters.
If a list is given, values from ``lambdas`` are assigned
successively to the list of ``network`` layers with
weights parameters going from the last layer to the first one.
If the length of ``lambdas`` is smaller than the length of
``network`` layers list, the last trade-off value will be
asigned to the remaining layers.
Attributes
----------
task_ : tensorflow Model
Network.
history_ : dict
history of the losses and metrics across the epochs
of the network training.
Examples
--------
>>> from adapt.utils import make_regression_da
>>> from adapt.parameter_based import RegularTransferNN
>>> Xs, ys, Xt, yt = make_regression_da()
>>> src_model = RegularTransferNN(loss="mse", lambdas=0., random_state=0)
>>> src_model.fit(Xs, ys, epochs=100, verbose=0)
>>> print(src_model.score(Xt, yt))
1/1 [==============================] - 0s 127ms/step - loss: 0.2744
0.27443504333496094
>>> model = RegularTransferNN(src_model.task_, loss="mse", lambdas=1., random_state=0)
>>> model.fit(Xt[:3], yt[:3], epochs=100, verbose=0)
>>> model.score(Xt, yt)
1/1 [==============================] - 0s 109ms/step - loss: 0.0832
0.08321201056241989
See also
--------
RegularTransferLR, RegularTransferLC
References
----------
.. [1] `[1] <https://www.microsoft.com/en-us/research/wp-\
content/uploads/2004/07/2004-chelba-emnlp.pdf>`_ C. Chelba and \
A. Acero. "Adaptation of maximum entropy classifier: Little data \
can help a lot". In EMNLP, 2004.
"""
def __init__(self,
task=None,
Xt=None,
yt=None,
lambdas=1.0,
regularizer="l2",
verbose=1,
copy=True,
random_state=None,
**params):
if not regularizer in ["l1", "l2"]:
raise ValueError("`regularizer` argument should be "
"'l1' or 'l2', got, %s"%str(regularizer))
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def fit(self, Xt=None, yt=None, **fit_params):
"""
Fit RegularTransferNN.
Parameters
----------
Xt : numpy array (default=None)
Target input data.
yt : numpy array (default=None)
Target output data.
fit_params : key, value arguments
Arguments given to the fit method of the model
(epochs, batch_size, callbacks...).
Returns
-------
self : returns an instance of self
"""
Xt, yt = self._get_target_data(Xt, yt)
Xs = Xt
ys = yt
return super().fit(Xs, ys, Xt=Xt, yt=yt, **fit_params)
def _initialize_networks(self):
if self.task is None:
self.task_ = get_default_task(name="task")
else:
self.task_ = check_network(self.task,
copy=self.copy,
name="task")
self._add_regularization()
def _get_regularizer(self, old_weight, weight, lambda_=1.):
if self.regularizer == "l2":
def regularizer():
return lambda_ * tf.reduce_mean(tf.square(old_weight - weight))
if self.regularizer == "l1":
def regularizer():
return lambda_ * tf.reduce_mean(tf.abs(old_weight - weight))
return regularizer
def _add_regularization(self):
i = 0
if not hasattr(self.lambdas, "__iter__"):
lambdas = [self.lambdas]
else:
lambdas = self.lambdas
for layer in reversed(self.task_.layers):
if (hasattr(layer, "weights") and
layer.weights is not None and
len(layer.weights) != 0):
if i >= len(lambdas):
lambda_ = lambdas[-1]
else:
lambda_ = lambdas[i]
for weight in reversed(layer.weights):
old_weight = tf.identity(weight)
old_weight.trainable = False
self.add_loss(self._get_regularizer(
old_weight, weight, lambda_))
i += 1
def call(self, inputs):
return self.task_(inputs)
def transform(self, X):
"""
Return X
Parameters
----------
X : array
input data
Returns
-------
X_enc : array
predictions of encoder network
"""
return X
def predict_disc(self, X):
"""
Not used.
"""
pass
@make_insert_doc(supervised=True)
class RegularTransferGP(BaseAdaptEstimator):
"""
Regular Transfer with Gaussian Process
RegularTransferGP is a parameter-based domain adaptation method.
The method is based on the assumption that a good target estimator
can be obtained by adapting the parameters of a pre-trained source
estimator using a few labeled target data.
The approach consist in fitting the `alpha` coeficients of a
Gaussian Process estimator on target data according to an
objective function regularized by the euclidean distance between
the source and target `alpha`:
.. math::
\\alpha_T = \\underset{\\alpha \in \\mathbb{R}^n}{\\text{argmin}}
\\, ||K_{TS} \\alpha - y_T||^2 + \\lambda ||\\alpha - \\alpha_S||^2
Where:
- :math:`\\alpha_T` are the target model coeficients.
- :math:`\\alpha_S = \\underset{\\alpha \\in \\mathbb{R}^n}{\\text{argmin}}
\\, ||K_{SS} \\alpha - y_S||^2` are the source model coeficients.
- :math:`y_S, y_T` are respectively the source and
the target labels.
- :math:`K_{SS}` is the pariwise kernel distance matrix between source
input data.
- :math:`K_{TS}` is the pariwise kernel distance matrix between target
and source input data.
- :math:`n` is the number of source data in :math:`X_S`
- :math:`\\lambda` is a trade-off parameter. The larger :math:`\\lambda`
the closer the target model will be from the source model.
The ``estimator`` given to ``RegularTransferGP`` should be from classes
``sklearn.gaussian_process.GaussianProcessRegressor`` or
``sklearn.gaussian_process.GaussianProcessClassifier``
Parameters
----------
lambda_ : float (default=1.0)
Trade-Off parameter. For large ``lambda_``, the
target model will be similar to the source model.
Attributes
----------
estimator_ : Same class as estimator
Fitted Estimator.
Examples
--------
>>> from sklearn.gaussian_process import GaussianProcessRegressor
>>> from sklearn.gaussian_process.kernels import Matern, WhiteKernel
>>> from adapt.utils import make_regression_da
>>> from adapt.parameter_based import RegularTransferGP
>>> Xs, ys, Xt, yt = make_regression_da()
>>> kernel = Matern() + WhiteKernel()
>>> src_model = GaussianProcessRegressor(kernel)
>>> src_model.fit(Xs, ys)
>>> print(src_model.score(Xt, yt))
-2.3409379221035382
>>> tgt_model = RegularTransferGP(src_model, lambda_=1.)
>>> tgt_model.fit(Xt[:3], yt[:3])
>>> tgt_model.score(Xt, yt)
-0.21947435769240653
See also
--------
RegularTransferLR, RegularTransferNN
References
----------
.. [1] `[1] <https://www.microsoft.com/en-us/research/wp-\
content/uploads/2004/07/2004-chelba-emnlp.pdf>`_ C. Chelba and \
A. Acero. "Adaptation of maximum entropy classifier: Little data \
can help a lot". In EMNLP, 2004.
"""
def __init__(self,
estimator=None,
Xt=None,
yt=None,
lambda_=1.,
copy=True,
verbose=1,
random_state=None,
**params):
if not hasattr(estimator, "kernel_"):
raise ValueError("`estimator` argument has no ``kernel_`` attribute, "
"please call `fit` on `estimator` or use "
"another estimator as `GaussianProcessRegressor` or "
"`GaussianProcessClassifier`.")
estimator = check_fitted_estimator(estimator)
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def fit(self, Xt=None, yt=None, **fit_params):
"""
Fit RegularTransferGP.
Parameters
----------
Xt : numpy array (default=None)
Target input data.
yt : numpy array (default=None)
Target output data.
fit_params : key, value arguments
Not used. Here for sklearn compatibility.
Returns
-------
self : returns an instance of self
"""
Xt, yt = self._get_target_data(Xt, yt)
Xt, yt = check_arrays(Xt, yt)
set_random_seed(self.random_state)
self.estimator_ = check_estimator(self.estimator,
copy=self.copy,
force_copy=True)
if isinstance(self.estimator, GaussianProcessRegressor):
src_linear_model = LinearRegression(fit_intercept=False)
src_linear_model.coef_ = self.estimator_.alpha_.transpose()
Kt = self.estimator_.kernel_(Xt, self.estimator_.X_train_)
tgt_linear_model = RegularTransferLR(src_linear_model, lambda_=self.lambda_)
tgt_linear_model.fit(Kt, yt)
self.estimator_.alpha_ = np.copy(tgt_linear_model.coef_).transpose()
elif isinstance(self.estimator, GaussianProcessClassifier):
if hasattr(self.estimator_.base_estimator_, "estimators_"):
for i in range(len(self.estimator_.base_estimator_.estimators_)):
c = self.estimator_.classes_[i]
if sum(yt == c) > 0:
yt_c = np.zeros(yt.shape[0])
yt_c[yt == c] = 1
self.estimator_.base_estimator_.estimators_[i] = self._fit_one_vs_one_classifier(
self.estimator_.base_estimator_.estimators_[i], Xt, yt_c)
else:
self.estimator_.base_estimator_ = self._fit_one_vs_one_classifier(
self.estimator_.base_estimator_, Xt, yt)
return self
def _fit_one_vs_one_classifier(self, estimator, Xt, yt):
src_linear_model = LinearRegression(fit_intercept=False)
src_linear_model.coef_ = (estimator.y_train_ - estimator.pi_)
src_linear_model.classes_ = estimator.classes_
Kt = estimator.kernel_(Xt, estimator.X_train_)
tgt_linear_model = RegularTransferLC(src_linear_model, lambda_=self.lambda_)
tgt_linear_model.fit(Kt, yt)
estimator.pi_ = (estimator.y_train_ - np.copy(tgt_linear_model.coef_).ravel())
return estimator | 22,633 | 33.821538 | 105 | py |
SCKD | SCKD-main/main.py | import argparse
import random
from sampler import data_sampler
from config import Config
import torch
from model.bert_encoder import Bert_Encoder
from model.dropout_layer import Dropout_Layer
from model.classifier import Softmax_Layer, Proto_Softmax_Layer
from data_loader import get_data_loader
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import numpy as np
from sklearn.cluster import KMeans
import collections
from copy import deepcopy
import os
# os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
def train_simple_model(config, encoder, dropout_layer, classifier, training_data, epochs, map_relid2tempid):
data_loader = get_data_loader(config, training_data, shuffle=True)
encoder.train()
dropout_layer.train()
classifier.train()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam([
{'params': encoder.parameters(), 'lr': 0.00001},
{'params': dropout_layer.parameters(), 'lr': 0.00001},
{'params': classifier.parameters(), 'lr': 0.001}
])
for epoch_i in range(epochs):
losses = []
for step, batch_data in enumerate(data_loader):
optimizer.zero_grad()
labels, _, tokens = batch_data
labels = labels.to(config.device)
labels = [map_relid2tempid[x.item()] for x in labels]
labels = torch.tensor(labels).to(config.device)
tokens = torch.stack([x.to(config.device) for x in tokens],dim=0)
reps = encoder(tokens)
reps, _ = dropout_layer(reps)
logits = classifier(reps)
loss = criterion(logits, labels)
losses.append(loss.item())
loss.backward()
optimizer.step()
print(f"loss is {np.array(losses).mean()}")
def compute_jsd_loss(m_input):
# m_input: the result of m times dropout after the classifier.
# size: m*B*C
m = m_input.shape[0]
mean = torch.mean(m_input, dim=0)
jsd = 0
for i in range(m):
loss = F.kl_div(F.log_softmax(mean, dim=-1), F.softmax(m_input[i], dim=-1), reduction='none')
loss = loss.sum()
jsd += loss / m
return jsd
def contrastive_loss(hidden, labels):
logsoftmax = nn.LogSoftmax(dim=-1)
return -(logsoftmax(hidden) * labels).sum() / labels.sum()
def construct_hard_triplets(output, labels, relation_data):
positive = []
negative = []
pdist = nn.PairwiseDistance(p=2)
for rep, label in zip(output, labels):
positive_relation_data = relation_data[label.item()]
negative_relation_data = []
for key in relation_data.keys():
if key != label.item():
negative_relation_data.extend(relation_data[key])
positive_distance = torch.stack([pdist(rep.cpu(), p) for p in positive_relation_data])
negative_distance = torch.stack([pdist(rep.cpu(), n) for n in negative_relation_data])
positive_index = torch.argmax(positive_distance)
negative_index = torch.argmin(negative_distance)
positive.append(positive_relation_data[positive_index.item()])
negative.append(negative_relation_data[negative_index.item()])
return positive, negative
def train_first(config, encoder, dropout_layer, classifier, training_data, epochs, map_relid2tempid, new_relation_data):
data_loader = get_data_loader(config, training_data, shuffle=True)
encoder.train()
dropout_layer.train()
classifier.train()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam([
{'params': encoder.parameters(), 'lr': 0.00001},
{'params': dropout_layer.parameters(), 'lr': 0.00001},
{'params': classifier.parameters(), 'lr': 0.001}
])
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
for epoch_i in range(epochs):
losses = []
for step, (labels, _, tokens) in enumerate(data_loader):
optimizer.zero_grad()
logits_all = []
tokens = torch.stack([x.to(config.device) for x in tokens], dim=0)
labels = labels.to(config.device)
origin_labels = labels[:]
labels = [map_relid2tempid[x.item()] for x in labels]
labels = torch.tensor(labels).to(config.device)
reps = encoder(tokens)
outputs,_ = dropout_layer(reps)
positives,negatives = construct_hard_triplets(outputs, origin_labels, new_relation_data)
for _ in range(config.f_pass):
output, output_embedding = dropout_layer(reps)
logits = classifier(output)
logits_all.append(logits)
positives = torch.cat(positives, 0).to(config.device)
negatives = torch.cat(negatives, 0).to(config.device)
anchors = outputs
logits_all = torch.stack(logits_all)
m_labels = labels.expand((config.f_pass, labels.shape[0])) # m,B
loss1 = criterion(logits_all.reshape(-1, logits_all.shape[-1]), m_labels.reshape(-1))
loss2 = compute_jsd_loss(logits_all)
tri_loss = triplet_loss(anchors, positives, negatives)
loss = loss1 + loss2 + tri_loss
loss.backward()
losses.append(loss.item())
optimizer.step()
print(f"loss is {np.array(losses).mean()}")
def train_mem_model(config, encoder, dropout_layer, classifier, training_data, epochs, map_relid2tempid, new_relation_data,
prev_encoder, prev_dropout_layer, prev_classifier, prev_relation_index):
data_loader = get_data_loader(config, training_data, shuffle=True)
encoder.train()
dropout_layer.train()
classifier.train()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam([
{'params': encoder.parameters(), 'lr': 0.00001},
{'params': dropout_layer.parameters(), 'lr': 0.00001},
{'params': classifier.parameters(), 'lr': 0.001}
])
triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
distill_criterion = nn.CosineEmbeddingLoss()
T = config.kl_temp
for epoch_i in range(epochs):
losses = []
for step, (labels, _, tokens) in enumerate(data_loader):
optimizer.zero_grad()
logits_all = []
tokens = torch.stack([x.to(config.device) for x in tokens], dim=0)
labels = labels.to(config.device)
origin_labels = labels[:]
labels = [map_relid2tempid[x.item()] for x in labels]
labels = torch.tensor(labels).to(config.device)
reps = encoder(tokens)
normalized_reps_emb = F.normalize(reps.view(-1, reps.size()[1]), p=2, dim=1)
outputs,_ = dropout_layer(reps)
if prev_dropout_layer is not None:
prev_outputs, _ = prev_dropout_layer(reps)
positives,negatives = construct_hard_triplets(prev_outputs, origin_labels, new_relation_data)
else:
positives, negatives = construct_hard_triplets(outputs, origin_labels, new_relation_data)
for _ in range(config.f_pass):
output, output_embedding = dropout_layer(reps)
logits = classifier(output)
logits_all.append(logits)
positives = torch.cat(positives, 0).to(config.device)
negatives = torch.cat(negatives, 0).to(config.device)
anchors = outputs
logits_all = torch.stack(logits_all)
m_labels = labels.expand((config.f_pass, labels.shape[0])) # m,B
loss1 = criterion(logits_all.reshape(-1, logits_all.shape[-1]), m_labels.reshape(-1))
loss2 = compute_jsd_loss(logits_all)
tri_loss = triplet_loss(anchors, positives, negatives)
loss = loss1 + loss2 + tri_loss
if prev_encoder is not None:
prev_reps = prev_encoder(tokens).detach()
normalized_prev_reps_emb = F.normalize(prev_reps.view(-1, prev_reps.size()[1]), p=2, dim=1)
feature_distill_loss = distill_criterion(normalized_reps_emb, normalized_prev_reps_emb,
torch.ones(tokens.size(0)).to(
config.device))
loss += feature_distill_loss
if prev_dropout_layer is not None and prev_classifier is not None:
prediction_distill_loss = None
dropout_output_all = []
prev_dropout_output_all = []
for i in range(config.f_pass):
output, _ = dropout_layer(reps)
prev_output, _ = prev_dropout_layer(reps)
dropout_output_all.append(output)
prev_dropout_output_all.append(output)
pre_logits = prev_classifier(output).detach()
pre_logits = F.softmax(pre_logits.index_select(1, prev_relation_index) / T, dim=1)
log_logits = F.log_softmax(logits_all[i].index_select(1, prev_relation_index) / T, dim=1)
if i == 0:
prediction_distill_loss = -torch.mean(torch.sum(pre_logits * log_logits, dim=1))
else:
prediction_distill_loss += -torch.mean(torch.sum(pre_logits * log_logits, dim=1))
prediction_distill_loss /= config.f_pass
loss += prediction_distill_loss
dropout_output_all = torch.stack(dropout_output_all)
prev_dropout_output_all = torch.stack(prev_dropout_output_all)
mean_dropout_output_all = torch.mean(dropout_output_all, dim=0)
mean_prev_dropout_output_all = torch.mean(prev_dropout_output_all,dim=0)
normalized_output = F.normalize(mean_dropout_output_all.view(-1, mean_dropout_output_all.size()[1]), p=2, dim=1)
normalized_prev_output = F.normalize(mean_prev_dropout_output_all.view(-1, mean_prev_dropout_output_all.size()[1]), p=2, dim=1)
hidden_distill_loss = distill_criterion(normalized_output, normalized_prev_output,
torch.ones(tokens.size(0)).to(
config.device))
loss += hidden_distill_loss
loss.backward()
losses.append(loss.item())
optimizer.step()
print(f"loss is {np.array(losses).mean()}")
def batch2device(batch_tuple, device):
ans = []
for var in batch_tuple:
if isinstance(var, torch.Tensor):
ans.append(var.to(device))
elif isinstance(var, list):
ans.append(batch2device(var))
elif isinstance(var, tuple):
ans.append(tuple(batch2device(var)))
else:
ans.append(var)
return ans
def evaluate_strict_model(config, encoder, dropout_layer, classifier, test_data, seen_relations, map_relid2tempid):
data_loader = get_data_loader(config, test_data, batch_size=1)
encoder.eval()
dropout_layer.eval()
classifier.eval()
n = len(test_data)
correct = 0
for step, batch_data in enumerate(data_loader):
labels, _, tokens = batch_data
labels = labels.to(config.device)
labels = [map_relid2tempid[x.item()] for x in labels]
labels = torch.tensor(labels).to(config.device)
tokens = torch.stack([x.to(config.device) for x in tokens],dim=0)
reps = encoder(tokens)
reps, _ = dropout_layer(reps)
logits = classifier(reps)
seen_relation_ids = [rel2id[relation] for relation in seen_relations]
seen_relation_ids = [map_relid2tempid[relation] for relation in seen_relation_ids]
seen_sim = logits[:,seen_relation_ids].cpu().data.numpy()
max_smi = np.max(seen_sim,axis=1)
label_smi = logits[:,labels].cpu().data.numpy()
if label_smi >= max_smi:
correct += 1
return correct/n
def select_data(config, encoder, dropout_layer, relation_dataset):
data_loader = get_data_loader(config, relation_dataset, shuffle=False, drop_last=False, batch_size=1)
features = []
encoder.eval()
dropout_layer.eval()
for step, batch_data in enumerate(data_loader):
labels, _, tokens = batch_data
tokens = torch.stack([x.to(config.device) for x in tokens],dim=0)
with torch.no_grad():
feature = dropout_layer(encoder(tokens))[1].cpu()
features.append(feature)
features = np.concatenate(features)
num_clusters = min(config.num_protos, len(relation_dataset))
distances = KMeans(n_clusters=num_clusters, random_state=0).fit_transform(features)
memory = []
for k in range(num_clusters):
sel_index = np.argmin(distances[:, k])
instance = relation_dataset[sel_index]
memory.append(instance)
return memory
def get_proto(config, encoder, dropout_layer, relation_dataset):
data_loader = get_data_loader(config, relation_dataset, shuffle=False, drop_last=False, batch_size=1)
features = []
encoder.eval()
dropout_layer.eval()
for step, batch_data in enumerate(data_loader):
labels, _, tokens = batch_data
tokens = torch.stack([x.to(config.device) for x in tokens],dim=0)
with torch.no_grad():
feature = dropout_layer(encoder(tokens))[1]
features.append(feature)
features = torch.cat(features, dim=0)
proto = torch.mean(features, dim=0, keepdim=True).cpu()
standard = torch.sqrt(torch.var(features, dim=0)).cpu()
return proto, standard
def generate_relation_data(protos, relation_standard):
relation_data = {}
relation_sample_nums = 10
for id in protos.keys():
relation_data[id] = []
difference = np.random.normal(loc=0, scale=1, size=relation_sample_nums)
for diff in difference:
relation_data[id].append(protos[id] + diff * relation_standard[id])
return relation_data
def generate_current_relation_data(config, encoder, dropout_layer, relation_dataset):
data_loader = get_data_loader(config, relation_dataset, shuffle=False, drop_last=False, batch_size=1)
relation_data = []
encoder.eval()
dropout_layer.eval()
for step, batch_data in enumerate(data_loader):
labels, _, tokens = batch_data
tokens = torch.stack([x.to(config.device) for x in tokens],dim=0)
with torch.no_grad():
feature = dropout_layer(encoder(tokens))[1].cpu()
relation_data.append(feature)
return relation_data
from transformers import BertTokenizer
def data_augmentation(config, encoder, train_data, prev_train_data):
expanded_train_data = train_data[:]
expanded_prev_train_data = prev_train_data[:]
encoder.eval()
all_data = train_data + prev_train_data
tokenizer = BertTokenizer.from_pretrained(config.bert_path, additional_special_tokens=["[E11]", "[E12]", "[E21]", "[E22]"])
entity_index = []
entity_mention = []
for sample in all_data:
e11 = sample['tokens'].index(30522)
e12 = sample['tokens'].index(30523)
e21 = sample['tokens'].index(30524)
e22 = sample['tokens'].index(30525)
entity_index.append([e11,e12])
entity_mention.append(sample['tokens'][e11+1:e12])
entity_index.append([e21,e22])
entity_mention.append(sample['tokens'][e21+1:e22])
data_loader = get_data_loader(config, all_data, shuffle=False, drop_last=False, batch_size=1)
features = []
encoder.eval()
for step, batch_data in enumerate(data_loader):
labels, _, tokens = batch_data
tokens = torch.stack([x.to(config.device) for x in tokens],dim=0)
with torch.no_grad():
feature = encoder(tokens)
feature1, feature2 = torch.split(feature, [config.encoder_output_size,config.encoder_output_size], dim=1)
features.append(feature1)
features.append(feature2)
features = torch.cat(features, dim=0)
# similarity_matrix = F.cosine_similarity(features.unsqueeze(1), features.unsqueeze(0), dim=-1)
similarity_matrix = []
for i in range(len(features)):
similarity_matrix.append([0]*len(features))
for i in range(len(features)):
for j in range(i,len(features)):
similarity = F.cosine_similarity(features[i],features[j],dim=0)
similarity_matrix[i][j] = similarity
similarity_matrix[j][i] = similarity
similarity_matrix = torch.tensor(similarity_matrix).to(config.device)
zero = torch.zeros_like(similarity_matrix).to(config.device)
diag = torch.diag_embed(torch.diag(similarity_matrix))
similarity_matrix -= diag
similarity_matrix = torch.where(similarity_matrix<0.95, zero, similarity_matrix)
nonzero_index = torch.nonzero(similarity_matrix)
expanded_train_count = 0
for origin, replace in nonzero_index:
sample_index = int(origin/2)
sample = all_data[sample_index]
if entity_mention[origin] == entity_mention[replace]:
continue
new_tokens = sample['tokens'][:entity_index[origin][0]+1] + entity_mention[replace] + sample['tokens'][entity_index[origin][1]:]
if len(new_tokens) < config.max_length:
new_tokens = new_tokens + [0]*(config.max_length-len(new_tokens))
else:
new_tokens = new_tokens[:config.max_length]
new_sample = {
'relation': sample['relation'],
'neg_labels': sample['neg_labels'],
'tokens': new_tokens
}
if sample_index < len(train_data) and expanded_train_count < 5 * len(train_data):
expanded_train_data.append(new_sample)
expanded_train_count += 1
else:
expanded_prev_train_data.append(new_sample)
return expanded_train_data, expanded_prev_train_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--task", default="tacred", type=str)
parser.add_argument("--shot", default=10, type=str)
parser.add_argument('--config', default='config.ini')
args = parser.parse_args()
config = Config(args.config)
config.device = torch.device(config.device)
config.n_gpu = torch.cuda.device_count()
config.batch_size_per_step = int(config.batch_size / config.gradient_accumulation_steps)
config.task = args.task
config.shot = args.shot
config.step1_epochs = 5
config.step2_epochs = 15
config.step3_epochs = 20
config.temperature = 0.08
if config.task == "FewRel":
config.relation_file = "data/fewrel/relation_name.txt"
config.rel_index = "data/fewrel/rel_index.npy"
config.rel_feature = "data/fewrel/rel_feature.npy"
config.rel_des_file = "data/fewrel/relation_description.txt"
config.num_of_relation = 80
if config.shot == 5:
config.rel_cluster_label = "data/fewrel/CFRLdata_10_100_10_5/rel_cluster_label_0.npy"
config.training_file = "data/fewrel/CFRLdata_10_100_10_5/train_0.txt"
config.valid_file = "data/fewrel/CFRLdata_10_100_10_5/valid_0.txt"
config.test_file = "data/fewrel/CFRLdata_10_100_10_5/test_0.txt"
elif config.shot == 10:
config.rel_cluster_label = "data/fewrel/CFRLdata_10_100_10_10/rel_cluster_label_0.npy"
config.training_file = "data/fewrel/CFRLdata_10_100_10_10/train_0.txt"
config.valid_file = "data/fewrel/CFRLdata_10_100_10_10/valid_0.txt"
config.test_file = "data/fewrel/CFRLdata_10_100_10_10/test_0.txt"
else:
config.rel_cluster_label = "data/fewrel/CFRLdata_10_100_10_2/rel_cluster_label_0.npy"
config.training_file = "data/fewrel/CFRLdata_10_100_10_2/train_0.txt"
config.valid_file = "data/fewrel/CFRLdata_10_100_10_2/valid_0.txt"
config.test_file = "data/fewrel/CFRLdata_10_100_10_2/test_0.txt"
else:
config.relation_file = "data/tacred/relation_name.txt"
config.rel_index = "data/tacred/rel_index.npy"
config.rel_feature = "data/tacred/rel_feature.npy"
config.num_of_relation = 41
if config.shot == 5:
config.rel_cluster_label = "data/tacred/CFRLdata_10_100_10_5/rel_cluster_label_0.npy"
config.training_file = "data/tacred/CFRLdata_10_100_10_5/train_0.txt"
config.valid_file = "data/tacred/CFRLdata_10_100_10_5/valid_0.txt"
config.test_file = "data/tacred/CFRLdata_10_100_10_5/test_0.txt"
else:
config.rel_cluster_label = "data/tacred/CFRLdata_10_100_10_10/rel_cluster_label_0.npy"
config.training_file = "data/tacred/CFRLdata_10_100_10_10/train_0.txt"
config.valid_file = "data/tacred/CFRLdata_10_100_10_10/valid_0.txt"
config.test_file = "data/tacred/CFRLdata_10_100_10_10/test_0.txt"
result_cur_test = []
result_whole_test = []
bwt_whole = []
fwt_whole = []
X = []
Y = []
relation_divides = []
for i in range(10):
relation_divides.append([])
for rou in range(config.total_round):
test_cur = []
test_total = []
random.seed(config.seed+rou*100)
sampler = data_sampler(config=config, seed=config.seed+rou*100)
id2rel = sampler.id2rel
rel2id = sampler.rel2id
id2sentence = sampler.get_id2sent()
encoder = Bert_Encoder(config=config).to(config.device)
dropout_layer = Dropout_Layer(config=config).to(config.device)
num_class = len(sampler.id2rel)
memorized_samples = {}
memory = collections.defaultdict(list)
history_relations = []
history_data = []
prev_relations = []
classifier = None
prev_classifier = None
prev_encoder = None
prev_dropout_layer = None
relation_standard = {}
forward_accs = []
for steps, (training_data, valid_data, test_data, current_relations, historic_test_data, seen_relations) in enumerate(sampler):
print(current_relations)
prev_relations = history_relations[:]
train_data_for_initial = []
count = 0
for relation in current_relations:
history_relations.append(relation)
train_data_for_initial += training_data[relation]
relation_divides[count].append(float(rel2id[relation]))
count += 1
temp_rel2id = [rel2id[x] for x in seen_relations]
map_relid2tempid = {k: v for v, k in enumerate(temp_rel2id)}
prev_relation_index = []
prev_samples = []
for relation in prev_relations:
prev_relation_index.append(map_relid2tempid[rel2id[relation]])
prev_samples += memorized_samples[relation]
prev_relation_index = torch.tensor(prev_relation_index).to(config.device)
classifier = Softmax_Layer(input_size=encoder.output_size, num_class=len(history_relations)).to(
config.device)
temp_protos = {}
for relation in current_relations:
proto, _ = get_proto(config, encoder, dropout_layer, training_data[relation])
temp_protos[rel2id[relation]] = proto
for relation in prev_relations:
proto, _ = get_proto(config, encoder, dropout_layer, memorized_samples[relation])
temp_protos[rel2id[relation]] = proto
test_data_1 = []
for relation in current_relations:
test_data_1 += test_data[relation]
if steps != 0:
forward_acc = evaluate_strict_model(config, prev_encoder, prev_dropout_layer, classifier, test_data_1, seen_relations, map_relid2tempid)
forward_accs.append(forward_acc)
train_simple_model(config, encoder, dropout_layer, classifier, train_data_for_initial, config.step1_epochs, map_relid2tempid)
print(f"simple finished")
temp_protos = {}
for relation in current_relations:
proto, standard = get_proto(config,encoder,dropout_layer,training_data[relation])
temp_protos[rel2id[relation]] = proto
relation_standard[rel2id[relation]] = standard
for relation in prev_relations:
proto, _ = get_proto(config,encoder,dropout_layer,memorized_samples[relation])
temp_protos[rel2id[relation]] = proto
new_relation_data = generate_relation_data(temp_protos, relation_standard)
for relation in current_relations:
new_relation_data[rel2id[relation]].extend(generate_current_relation_data(config, encoder,dropout_layer,training_data[relation]))
expanded_train_data_for_initial, expanded_prev_samples = data_augmentation(config, encoder,
train_data_for_initial,
prev_samples)
torch.cuda.empty_cache()
print(len(train_data_for_initial))
print(len(expanded_train_data_for_initial))
train_mem_model(config, encoder, dropout_layer, classifier, train_data_for_initial, config.step2_epochs, map_relid2tempid, new_relation_data,
prev_encoder, prev_dropout_layer, prev_classifier, prev_relation_index)
print(f"first finished")
for relation in current_relations:
memorized_samples[relation] = select_data(config, encoder, dropout_layer, training_data[relation])
memory[rel2id[relation]] = select_data(config, encoder, dropout_layer, training_data[relation])
train_data_for_memory = []
# train_data_for_memory += expanded_prev_samples
train_data_for_memory += prev_samples
for relation in current_relations:
train_data_for_memory += memorized_samples[relation]
print(len(seen_relations))
print(len(train_data_for_memory))
temp_protos = {}
for relation in seen_relations:
proto, _ = get_proto(config, encoder, dropout_layer, memorized_samples[relation])
temp_protos[rel2id[relation]] = proto
train_mem_model(config, encoder, dropout_layer, classifier, train_data_for_memory, config.step3_epochs, map_relid2tempid, new_relation_data,
prev_encoder, prev_dropout_layer, prev_classifier, prev_relation_index)
print(f"memory finished")
test_data_1 = []
for relation in current_relations:
test_data_1 += test_data[relation]
test_data_2 = []
for relation in seen_relations:
test_data_2 += historic_test_data[relation]
history_data.append(test_data_1)
print(len(test_data_1))
print(len(test_data_2))
# cur_acc = evaluate_strict_model(config, encoder, classifier, test_data_1, seen_relations, map_relid2tempid)
# total_acc = evaluate_strict_model(config, encoder, classifier, test_data_2, seen_relations, map_relid2tempid)
cur_acc = evaluate_strict_model(config, encoder,dropout_layer,classifier, test_data_1, seen_relations, map_relid2tempid)
total_acc = evaluate_strict_model(config, encoder, dropout_layer, classifier, test_data_2, seen_relations, map_relid2tempid)
print(f'Restart Num {rou + 1}')
print(f'task--{steps + 1}:')
print(f'current test acc:{cur_acc}')
print(f'history test acc:{total_acc}')
test_cur.append(cur_acc)
test_total.append(total_acc)
print(test_cur)
print(test_total)
accuracy = []
temp_rel2id = [rel2id[x] for x in history_relations]
map_relid2tempid = {k: v for v, k in enumerate(temp_rel2id)}
for data in history_data:
# accuracy.append(
# evaluate_strict_model(config, encoder, classifier, data, history_relations, map_relid2tempid))
accuracy.append(evaluate_strict_model(config, encoder, dropout_layer, classifier, data, seen_relations, map_relid2tempid))
print(accuracy)
prev_encoder = deepcopy(encoder)
prev_dropout_layer = deepcopy(dropout_layer)
prev_classifier = deepcopy(classifier)
torch.cuda.empty_cache()
result_cur_test.append(np.array(test_cur))
result_whole_test.append(np.array(test_total)*100)
print("result_whole_test")
print(result_whole_test)
avg_result_cur_test = np.average(result_cur_test, 0)
avg_result_all_test = np.average(result_whole_test, 0)
print("avg_result_cur_test")
print(avg_result_cur_test)
print("avg_result_all_test")
print(avg_result_all_test)
std_result_all_test = np.std(result_whole_test, 0)
print("std_result_all_test")
print(std_result_all_test)
accuracy = []
temp_rel2id = [rel2id[x] for x in history_relations]
map_relid2tempid = {k: v for v, k in enumerate(temp_rel2id)}
for data in history_data:
accuracy.append(evaluate_strict_model(config, encoder, dropout_layer, classifier, data, history_relations, map_relid2tempid))
print(accuracy)
bwt = 0.0
for k in range(len(accuracy)-1):
bwt += accuracy[k]-test_cur[k]
bwt /= len(accuracy)-1
bwt_whole.append(bwt)
fwt_whole.append(np.average(np.array(forward_accs)))
print("bwt_whole")
print(bwt_whole)
print("fwt_whole")
print(fwt_whole)
avg_bwt = np.average(np.array(bwt_whole))
print("avg_bwt_whole")
print(avg_bwt)
avg_fwt = np.average(np.array(fwt_whole))
print("avg_fwt_whole")
print(avg_fwt)
| 30,103 | 42.377522 | 153 | py |
SCKD | SCKD-main/data_loader.py | import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
class data_set(Dataset):
def __init__(self, data,config=None):
self.data = data
self.config = config
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def collate_fn(self, data):
label = torch.tensor([item['relation'] for item in data])
neg_labels = [torch.tensor(item['neg_labels']) for item in data]
tokens = [torch.tensor(item['tokens']) for item in data]
return (
label,
neg_labels,
tokens
)
def get_data_loader(config, data, shuffle = False, drop_last = False, batch_size = None):
dataset = data_set(data, config)
if batch_size == None:
batch_size = min(config.batch_size_per_step, len(data))
else:
batch_size = min(batch_size, len(data))
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
pin_memory=True,
num_workers=config.num_workers,
collate_fn=dataset.collate_fn,
drop_last=drop_last)
return data_loader | 1,197 | 25.622222 | 89 | py |
SCKD | SCKD-main/model/base_model.py | import torch
import torch.nn as nn
import os
import json
class base_model(nn.Module):
def __init__(self):
super(base_model, self).__init__()
self.zero_const = nn.Parameter(torch.Tensor([0]))
self.zero_const.requires_grad = False
self.pi_const = nn.Parameter(torch.Tensor([3.14159265358979323846]))
self.pi_const.requires_grad = False
def save_checkpoint(self, path):
torch.save(self.state_dict(), path)
def load_checkpoint(self, path):
self.load_state_dict(torch.load(os.path.join(path)))
self.eval()
def save_parameters(self, path):
f = open(path, "w")
f.write(json.dumps(self.get_parameters("list")))
f.close()
def load_parameters(self, path):
f = open(path, "r")
parameters = json.loads(f.read())
f.close()
for i in parameters:
parameters[i] = torch.Tensor(parameters[i])
self.load_state_dict(parameters, strict=False)
self.eval()
def get_parameters(self, mode="numpy", param_dict=None):
all_param_dict = self.state_dict()
if param_dict == None:
param_dict = all_param_dict.keys()
res = {}
for param in param_dict:
if mode == "numpy":
res[param] = all_param_dict[param].cpu().numpy()
elif mode == "list":
res[param] = all_param_dict[param].cpu().numpy().tolist()
else:
res[param] = all_param_dict[param]
return res
def set_parameters(self, parameters):
for i in parameters:
parameters[i] = torch.Tensor(parameters[i])
self.load_state_dict(parameters, strict = False)
self.eval() | 1,737 | 31.185185 | 76 | py |
SCKD | SCKD-main/model/classifier.py | from torch import nn, optim
from model.base_model import base_model
import torch
class Softmax_Layer(base_model):
"""
Softmax classifier for sentence-level relation extraction.
"""
def __init__(self, input_size, num_class):
"""
Args:
num_class: number of classes
"""
super(Softmax_Layer, self).__init__()
self.input_size = input_size
self.num_class = num_class
self.fc = nn.Linear(self.input_size, self.num_class, bias=False)
def forward(self, input):
"""
Args:
args: depends on the encoder
Return:
logits, (B, N)
"""
logits = self.fc(input)
return logits
class Proto_Softmax_Layer(base_model):
"""
Softmax classifier for sentence-level relation extraction.
"""
def __init__(self, config):
"""
Args:
sentence_encoder: encoder for sentences
num_class: number of classes
id2rel: dictionary of id -> relation name mapping
"""
super(Proto_Softmax_Layer, self).__init__()
self.config = config
def set_prototypes(self, protos):
self.prototypes = protos.to(self.config.device)
def forward(self, rep):
dis_mem = self.__distance__(rep, self.prototypes)
return dis_mem
def __distance__(self, rep, rel):
'''
rep_ = rep.view(rep.shape[0], 1, rep.shape[-1])
rel_ = rel.view(1, -1, rel.shape[-1])
dis = (rep_ * rel_).sum(-1)
return dis
'''
rep_norm = rep / rep.norm(dim=1)[:, None]
rel_norm = rel / rel.norm(dim=1)[:, None]
res = torch.mm(rep_norm, rel_norm.transpose(0, 1))
return res | 1,751 | 25.545455 | 72 | py |
SCKD | SCKD-main/model/dropout_layer.py | from torch import nn
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from model.base_model import base_model
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True, epsilon=None, conditional=False,
hidden_units=None, hidden_activation='linear', hidden_initializer='xaiver', **kwargs):
super(LayerNorm, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim, out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier': # glorot_uniform
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1) # cond = K.expand_dims(cond, 1)
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) ** 0.5
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
class Dropout_Layer(base_model):
def __init__(self, config):
super(Dropout_Layer, self).__init__()
self.drop = nn.Dropout(config.drop_out)
self.linear_transform = nn.Linear(config.hidden_size * 2, config.output_size, bias=True)
self.layer_normalization = nn.LayerNorm([config.output_size])
def forward(self, input):
output = self.drop(input)
output1 = self.linear_transform(output)
output = F.gelu(output1)
output = self.layer_normalization(output)
return output, output1
| 3,674 | 36.886598 | 116 | py |
SCKD | SCKD-main/model/bert_encoder.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from model.base_model import base_model
from transformers import BertModel, BertConfig
class Bert_Encoder(base_model):
def __init__(self, config):
super(Bert_Encoder, self).__init__()
# load model
self.encoder = BertModel.from_pretrained(config.bert_path).cuda()
self.bert_config = BertConfig.from_pretrained(config.bert_path)
# the dimension for the final outputs
self.output_size = config.encoder_output_size
self.drop = nn.Dropout(config.drop_out)
# find which encoding is used
if config.pattern in ['standard', 'entity_marker']:
self.pattern = config.pattern
else:
raise Exception('Wrong encoding.')
config.hidden_size = self.bert_config.hidden_size
config.output_size = config.encoder_output_size
if self.pattern == 'entity_marker':
self.encoder.resize_token_embeddings(config.vocab_size + config.marker_size)
self.linear_transform = nn.Linear(self.bert_config.hidden_size * 2, self.output_size, bias=True)
else:
self.linear_transform = nn.Linear(self.bert_config.hidden_size, self.output_size, bias=True)
self.layer_normalization = nn.LayerNorm([self.output_size])
def get_output_size(self):
return self.output_size
def forward(self, inputs):
'''
:param inputs: of dimension [B, N]
:return: a result of size [B, H*2] or [B, H], according to different strategy
'''
# generate representation under a certain encoding strategy
if self.pattern == 'standard':
# in the standard mode, the representation is generated according to
# the representation of[CLS] mark.
output = self.encoder(inputs)[1]
else:
# in the entity_marker mode, the representation is generated from the representations of
# marks [E11] and [E21] of the head and tail entities.
e11 = []
e21 = []
# for each sample in the batch, acquire the positions of its [E11] and [E21]
for i in range(inputs.size()[0]):
tokens = inputs[i].cpu().numpy()
e11.append(np.argwhere(tokens == 30522)[0][0])
e21.append(np.argwhere(tokens == 30524)[0][0])
# input the sample to BERT
tokens_output = self.encoder(inputs)[0] # [B,N] --> [B,N,H]
output = []
# for each sample in the batch, acquire its representations for [E11] and [E21]
for i in range(len(e11)):
instance_output = torch.index_select(tokens_output, 0, torch.tensor(i).cuda())
instance_output = torch.index_select(instance_output, 1, torch.tensor([e11[i], e21[i]]).cuda())
output.append(instance_output) # [B,N] --> [B,2,H]
# for each sample in the batch, concatenate the representations of [E11] and [E21], and reshape
output = torch.cat(output, dim=0)
output = output.view(output.size()[0], -1) # [B,N] --> [B,H*2]
# the output dimension is [B, H*2], B: batchsize, H: hiddensize
# output = self.drop(output)
# output = self.linear_transform(output)
# output = F.gelu(output)
# output = self.layer_normalization(output)
return output | 3,473 | 41.888889 | 111 | py |
TaNP | TaNP-main/TaNP/TaNP_training.py | import os
import torch
import pickle
import random
from eval import testing
def training(trainer, opt, train_dataset, test_dataset, batch_size, num_epoch, model_save=True, model_filename=None, logger=None):
training_set_size = len(train_dataset)
for epoch in range(num_epoch):
random.shuffle(train_dataset)
num_batch = int(training_set_size / batch_size)
a, b, c, d = zip(*train_dataset)
trainer.train()
all_C_distribs = []
for i in range(num_batch):
try:
supp_xs = list(a[batch_size*i:batch_size*(i+1)])
supp_ys = list(b[batch_size*i:batch_size*(i+1)])
query_xs = list(c[batch_size*i:batch_size*(i+1)])
query_ys = list(d[batch_size*i:batch_size*(i+1)])
except IndexError:
continue
train_loss, batch_C_distribs = trainer.global_update(supp_xs, supp_ys, query_xs, query_ys)
all_C_distribs.append(batch_C_distribs)
P5, NDCG5, MAP5, P7, NDCG7, MAP7, P10, NDCG10, MAP10 = testing(trainer, opt, test_dataset)
logger.log(
"{}\t{:.6f}\t TOP-5 {:.4f}\t{:.4f}\t{:.4f}\t TOP-7: {:.4f}\t{:.4f}\t{:.4f}"
"\t TOP-10: {:.4f}\t{:.4f}\t{:.4f}".
format(epoch, train_loss, P5, NDCG5, MAP5, P7, NDCG7, MAP7, P10, NDCG10, MAP10))
if epoch == (num_epoch-1):
with open('output_att', 'wb') as fp:
pickle.dump(all_C_distribs, fp)
if model_save:
torch.save(trainer.state_dict(), model_filename)
| 1,561 | 41.216216 | 130 | py |
TaNP | TaNP-main/TaNP/embeddings_TaNP.py | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
class Item(torch.nn.Module):
def __init__(self, config):
super(Item, self).__init__()
self.feature_dim = config['if_dim']
self.first_embedding_dim = config['first_embedding_dim']
self.second_embedding_dim = config['second_embedding_dim']
self.first_embedding_layer = torch.nn.Linear(
in_features=self.feature_dim,
out_features=self.first_embedding_dim,
bias=True
)
self.second_embedding_layer = torch.nn.Linear(
in_features=self.first_embedding_dim,
out_features=self.second_embedding_dim,
bias=True
)
def forward(self, x, vars=None):
first_hidden = self.first_embedding_layer(x)
first_hidden = F.relu(first_hidden)
sec_hidden = self.second_embedding_layer(first_hidden)
return F.relu(sec_hidden)
class Movie_item(torch.nn.Module):
def __init__(self, config):
super(Moive_item, self).__init__()
self.num_rate = config['num_rate']
self.num_genre = config['num_genre']
self.num_director = config['num_director']
self.num_actor = config['num_actor']
self.embedding_dim = config['embedding_dim']
self.embedding_rate = torch.nn.Embedding(
num_embeddings=self.num_rate,
embedding_dim=self.embedding_dim
)
self.embedding_genre = torch.nn.Linear(
in_features=self.num_genre,
out_features=self.embedding_dim,
bias=False
)
self.embedding_director = torch.nn.Linear(
in_features=self.num_director,
out_features=self.embedding_dim,
bias=False
)
self.embedding_actor = torch.nn.Linear(
in_features=self.num_actor,
out_features=self.embedding_dim,
bias=False
)
def forward(self, rate_idx, genre_idx, director_idx, actors_idx, vars=None):
rate_emb = self.embedding_rate(rate_idx)
genre_emb = self.embedding_genre(genre_idx.float()) / torch.sum(genre_idx.float(), 1).view(-1, 1)
director_emb = self.embedding_director(director_idx.float()) / torch.sum(director_idx.float(), 1).view(-1, 1)
actors_emb = self.embedding_actor(actors_idx.float()) / torch.sum(actors_idx.float(), 1).view(-1, 1)
return torch.cat((rate_emb, genre_emb, director_emb, actors_emb), 1)
class User(torch.nn.Module):
def __init__(self, config):
super(User, self).__init__()
self.feature_dim = config['uf_dim']
self.first_embedding_dim = config['first_embedding_dim']
self.second_embedding_dim = config['second_embedding_dim']
self.first_embedding_layer = torch.nn.Linear(
in_features=self.feature_dim,
out_features=self.first_embedding_dim,
bias=True
)
self.second_embedding_layer = torch.nn.Linear(
in_features=self.first_embedding_dim,
out_features=self.second_embedding_dim,
bias=True
)
def forward(self, x, vars=None):
first_hidden = self.first_embedding_layer(x)
first_hidden = F.relu(first_hidden)
sec_hidden = self.second_embedding_layer(first_hidden)
return F.relu(sec_hidden)
class Movie_user(torch.nn.Module):
def __init__(self, config):
super(Movie_user, self).__init__()
self.num_gender = config['num_gender']
self.num_age = config['num_age']
self.num_occupation = config['num_occupation']
self.num_zipcode = config['num_zipcode']
self.embedding_dim = config['embedding_dim']
self.embedding_gender = torch.nn.Embedding(
num_embeddings=self.num_gender,
embedding_dim=self.embedding_dim
)
self.embedding_age = torch.nn.Embedding(
num_embeddings=self.num_age,
embedding_dim=self.embedding_dim
)
self.embedding_occupation = torch.nn.Embedding(
num_embeddings=self.num_occupation,
embedding_dim=self.embedding_dim
)
self.embedding_area = torch.nn.Embedding(
num_embeddings=self.num_zipcode,
embedding_dim=self.embedding_dim
)
def forward(self, gender_idx, age_idx, occupation_idx, area_idx):
gender_emb = self.embedding_gender(gender_idx)
age_emb = self.embedding_age(age_idx)
occupation_emb = self.embedding_occupation(occupation_idx)
area_emb = self.embedding_area(area_idx)
return torch.cat((gender_emb, age_emb, occupation_emb, area_emb), 1)
class Encoder(nn.Module):
#Maps an (x_i, y_i) pair to a representation r_i.
# Add the dropout into encoder ---03.31
def __init__(self, x_dim, y_dim, h1_dim, h2_dim, z1_dim, dropout_rate):
super(Encoder, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.h1_dim = h1_dim
self.h2_dim = h2_dim
self.z1_dim = z1_dim
self.dropout_rate = dropout_rate
layers = [nn.Linear(self.x_dim + self.y_dim, self.h1_dim),
torch.nn.Dropout(self.dropout_rate),
nn.ReLU(inplace=True),
nn.Linear(self.h1_dim, self.h2_dim),
torch.nn.Dropout(self.dropout_rate),
nn.ReLU(inplace=True),
nn.Linear(self.h2_dim, self.z1_dim)]
self.input_to_hidden = nn.Sequential(*layers)
def forward(self, x, y):
y = y.view(-1, 1)
input_pairs = torch.cat((x, y), dim=1)
return self.input_to_hidden(input_pairs)
class MuSigmaEncoder(nn.Module):
def __init__(self, z1_dim, z2_dim, z_dim):
super(MuSigmaEncoder, self).__init__()
self.z1_dim = z1_dim
self.z2_dim = z2_dim
self.z_dim = z_dim
self.z_to_hidden = nn.Linear(self.z1_dim, self.z2_dim)
self.hidden_to_mu = nn.Linear(self.z2_dim, z_dim)
self.hidden_to_logsigma = nn.Linear(self.z2_dim, z_dim)
def forward(self, z_input):
hidden = torch.relu(self.z_to_hidden(z_input))
mu = self.hidden_to_mu(hidden)
log_sigma = self.hidden_to_logsigma(hidden)
std = torch.exp(0.5 * log_sigma)
eps = torch.randn_like(std)
z = eps.mul(std).add_(mu)
return mu, log_sigma, z
class TaskEncoder(nn.Module):
def __init__(self, x_dim, y_dim, h1_dim, h2_dim, final_dim, dropout_rate):
super(TaskEncoder, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.h1_dim = h1_dim
self.h2_dim = h2_dim
self.final_dim = final_dim
self.dropout_rate = dropout_rate
layers = [nn.Linear(self.x_dim + self.y_dim, self.h1_dim),
torch.nn.Dropout(self.dropout_rate),
nn.ReLU(inplace=True),
nn.Linear(self.h1_dim, self.h2_dim),
torch.nn.Dropout(self.dropout_rate),
nn.ReLU(inplace=True),
nn.Linear(self.h2_dim, self.final_dim)]
self.input_to_hidden = nn.Sequential(*layers)
def forward(self, x, y):
y = y.view(-1, 1)
input_pairs = torch.cat((x, y), dim=1)
return self.input_to_hidden(input_pairs)
class MemoryUnit(nn.Module):
# clusters_k is k keys
def __init__(self, clusters_k, emb_size, temperature):
super(MemoryUnit, self).__init__()
self.clusters_k = clusters_k
self.embed_size = emb_size
self.temperature = temperature
self.array = nn.Parameter(init.xavier_uniform_(torch.FloatTensor(self.clusters_k, self.embed_size)))
def forward(self, task_embed):
res = torch.norm(task_embed-self.array, p=2, dim=1, keepdim=True)
res = torch.pow((res / self.temperature) + 1, (self.temperature + 1) / -2)
# 1*k
C = torch.transpose(res / res.sum(), 0, 1)
# 1*k, k*d, 1*d
value = torch.mm(C, self.array)
# simple add operation
new_task_embed = value + task_embed
# calculate target distribution
return C, new_task_embed
class Decoder(nn.Module):
"""
Maps target input x_target and z, r to predictions y_target.
"""
def __init__(self, x_dim, z_dim, task_dim, h1_dim, h2_dim, h3_dim, y_dim, dropout_rate):
super(Decoder, self).__init__()
self.x_dim = x_dim
self.z_dim = z_dim
self.task_dim = task_dim
self.h1_dim = h1_dim
self.h2_dim = h2_dim
self.h3_dim = h3_dim
self.y_dim = y_dim
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(self.dropout_rate)
self.hidden_layer_1 = nn.Linear(self.x_dim + self.z_dim, self.h1_dim)
self.hidden_layer_2 = nn.Linear(self.h1_dim, self.h2_dim)
self.hidden_layer_3 = nn.Linear(self.h2_dim, self.h3_dim)
self.film_layer_1_beta = nn.Linear(self.task_dim, self.h1_dim, bias=False)
self.film_layer_1_gamma = nn.Linear(self.task_dim, self.h1_dim, bias=False)
self.film_layer_2_beta = nn.Linear(self.task_dim, self.h2_dim, bias=False)
self.film_layer_2_gamma = nn.Linear(self.task_dim, self.h2_dim, bias=False)
self.film_layer_3_beta = nn.Linear(self.task_dim, self.h3_dim, bias=False)
self.film_layer_3_gamma = nn.Linear(self.task_dim, self.h3_dim, bias=False)
self.final_projection = nn.Linear(self.h3_dim, self.y_dim)
def forward(self, x, z, task):
interaction_size, _ = x.size()
z = z.unsqueeze(0).repeat(interaction_size, 1)
# Input is concatenation of z with every row of x
inputs = torch.cat((x, z), dim=1)
hidden_1 = self.hidden_layer_1(inputs)
beta_1 = torch.tanh(self.film_layer_1_beta(task))
gamma_1 = torch.tanh(self.film_layer_1_gamma(task))
hidden_1 = torch.mul(hidden_1, gamma_1) + beta_1
hidden_1 = self.dropout(hidden_1)
hidden_2 = F.relu(hidden_1)
hidden_2 = self.hidden_layer_2(hidden_2)
beta_2 = torch.tanh(self.film_layer_2_beta(task))
gamma_2 = torch.tanh(self.film_layer_2_gamma(task))
hidden_2 = torch.mul(hidden_2, gamma_2) + beta_2
hidden_2 = self.dropout(hidden_2)
hidden_3 = F.relu(hidden_2)
hidden_3 = self.hidden_layer_3(hidden_3)
beta_3 = torch.tanh(self.film_layer_3_beta(task))
gamma_3 = torch.tanh(self.film_layer_3_gamma(task))
hidden_final = torch.mul(hidden_3, gamma_3) + beta_3
hidden_final = self.dropout(hidden_final)
hidden_final = F.relu(hidden_final)
y_pred = self.final_projection(hidden_final)
return y_pred
class Gating_Decoder(nn.Module):
def __init__(self, x_dim, z_dim, task_dim, h1_dim, h2_dim, h3_dim, y_dim, dropout_rate):
super(Gating_Decoder, self).__init__()
self.x_dim = x_dim
self.z_dim = z_dim
self.task_dim = task_dim
self.h1_dim = h1_dim
self.h2_dim = h2_dim
self.h3_dim = h3_dim
self.y_dim = y_dim
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(self.dropout_rate)
self.hidden_layer_1 = nn.Linear(self.x_dim + self.z_dim, self.h1_dim)
self.hidden_layer_2 = nn.Linear(self.h1_dim, self.h2_dim)
self.hidden_layer_3 = nn.Linear(self.h2_dim, self.h3_dim)
self.film_layer_1_beta = nn.Linear(self.task_dim, self.h1_dim, bias=False)
self.film_layer_1_gamma = nn.Linear(self.task_dim, self.h1_dim, bias=False)
self.film_layer_1_eta = nn.Linear(self.task_dim, self.h1_dim, bias=False)
self.film_layer_1_delta = nn.Linear(self.task_dim, self.h1_dim, bias=False)
self.film_layer_2_beta = nn.Linear(self.task_dim, self.h2_dim, bias=False)
self.film_layer_2_gamma = nn.Linear(self.task_dim, self.h2_dim, bias=False)
self.film_layer_2_eta = nn.Linear(self.task_dim, self.h2_dim, bias=False)
self.film_layer_2_delta = nn.Linear(self.task_dim, self.h2_dim, bias=False)
self.film_layer_3_beta = nn.Linear(self.task_dim, self.h3_dim, bias=False)
self.film_layer_3_gamma = nn.Linear(self.task_dim, self.h3_dim, bias=False)
self.film_layer_3_eta = nn.Linear(self.task_dim, self.h3_dim, bias=False)
self.film_layer_3_delta = nn.Linear(self.task_dim, self.h3_dim, bias=False)
self.final_projection = nn.Linear(self.h3_dim, self.y_dim)
def forward(self, x, z, task):
interaction_size, _ = x.size()
z = z.unsqueeze(0).repeat(interaction_size, 1)
# Input is concatenation of z with every row of x
inputs = torch.cat((x, z), dim=1)
hidden_1 = self.hidden_layer_1(inputs)
beta_1 = torch.tanh(self.film_layer_1_beta(task))
gamma_1 = torch.tanh(self.film_layer_1_gamma(task))
eta_1 = torch.tanh(self.film_layer_1_eta(task))
delta_1 = torch.sigmoid(self.film_layer_1_delta(task))
gamma_1 = gamma_1 * delta_1 + eta_1 * (1-delta_1)
beta_1 = beta_1 * delta_1 + eta_1 * (1-delta_1)
hidden_1 = torch.mul(hidden_1, gamma_1) + beta_1
hidden_1 = self.dropout(hidden_1)
hidden_2 = F.relu(hidden_1)
hidden_2 = self.hidden_layer_2(hidden_2)
beta_2 = torch.tanh(self.film_layer_2_beta(task))
gamma_2 = torch.tanh(self.film_layer_2_gamma(task))
eta_2 = torch.tanh(self.film_layer_2_eta(task))
delta_2 = torch.sigmoid(self.film_layer_2_delta(task))
gamma_2 = gamma_2 * delta_2 + eta_2 * (1 - delta_2)
beta_2 = beta_2 * delta_2 + eta_2 * (1 - delta_2)
hidden_2 = torch.mul(hidden_2, gamma_2) + beta_2
hidden_2 = self.dropout(hidden_2)
hidden_3 = F.relu(hidden_2)
hidden_3 = self.hidden_layer_3(hidden_3)
beta_3 = torch.tanh(self.film_layer_3_beta(task))
gamma_3 = torch.tanh(self.film_layer_3_gamma(task))
eta_3 = torch.tanh(self.film_layer_3_eta(task))
delta_3 = torch.sigmoid(self.film_layer_3_delta(task))
gamma_3 = gamma_3 * delta_3 + eta_3 * (1 - delta_3)
beta_3 = beta_3 * delta_3 + eta_3 * (1 - delta_3)
hidden_final = torch.mul(hidden_3, gamma_3) + beta_3
hidden_final = self.dropout(hidden_final)
hidden_final = F.relu(hidden_final)
y_pred = self.final_projection(hidden_final)
return y_pred
| 14,501 | 38.194595 | 117 | py |
TaNP | TaNP-main/TaNP/eval.py | """
Run evaluation with saved models.
"""
import random
import argparse
from tqdm import tqdm
import torch
from utils.scorer import *
def testing(trainer, opt, test_dataset):
test_dataset_len = len(test_dataset)
#batch_size = opt["batch_size"]
minibatch_size = 1
a, b, c, d = zip(*test_dataset)
trainer.eval()
all_loss = 0
pre5 = []
ap5 = []
ndcg5 = []
pre7 = []
ap7 = []
ndcg7 = []
pre10 = []
ap10 = []
ndcg10 = []
for i in range(test_dataset_len):
try:
supp_xs = list(a[minibatch_size * i:minibatch_size * (i + 1)])
supp_ys = list(b[minibatch_size * i:minibatch_size * (i + 1)])
query_xs = list(c[minibatch_size * i:minibatch_size * (i + 1)])
query_ys = list(d[minibatch_size * i:minibatch_size * (i + 1)])
except IndexError:
continue
test_loss, recommendation_list = trainer.query_rec(supp_xs, supp_ys, query_xs, query_ys)
all_loss += test_loss
add_metric(recommendation_list, query_ys[0].cpu().detach().numpy(), pre5, ap5, ndcg5, 5)
add_metric(recommendation_list, query_ys[0].cpu().detach().numpy(), pre7, ap7, ndcg7, 7)
add_metric(recommendation_list, query_ys[0].cpu().detach().numpy(), pre10, ap10, ndcg10, 10)
mpre5, mndcg5, map5 = cal_metric(pre5, ap5, ndcg5)
mpre7, mndcg7, map7 = cal_metric(pre7, ap7, ndcg7)
mpre10, mndcg10, map10 = cal_metric(pre10, ap10, ndcg10)
return mpre5, mndcg5, map5, mpre7, mndcg7, map7, mpre10, mndcg10, map10
| 1,551 | 32.021277 | 100 | py |
TaNP | TaNP-main/TaNP/train_TaNP.py | import os
from datetime import datetime
import time
import numpy as np
import random
import argparse
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import json
from utils.loader import Preprocess
from TaNP import Trainer
from TaNP_training import training
from utils import helper
from eval import testing
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/lastfm_20')#1
parser.add_argument('--model_save_dir', type=str, default='save_model_dir')#1
parser.add_argument('--id', type=str, default='1', help='used for save hyper-parameters.')#1
parser.add_argument('--first_embedding_dim', type=int, default=32, help='Embedding dimension for item and user.')#1
parser.add_argument('--second_embedding_dim', type=int, default=16, help='Embedding dimension for item and user.')#1
parser.add_argument('--z1_dim', type=int, default=32, help='The dimension of z1 in latent path.')
parser.add_argument('--z2_dim', type=int, default=32, help='The dimension of z2 in latent path.')
parser.add_argument('--z_dim', type=int, default=32, help='The dimension of z in latent path.')
parser.add_argument('--enc_h1_dim', type=int, default=64, help='The hidden first dimension of encoder.')
parser.add_argument('--enc_h2_dim', type=int, default=64, help='The hidden second dimension of encoder.')
parser.add_argument('--taskenc_h1_dim', type=int, default=128, help='The hidden first dimension of task encoder.')
parser.add_argument('--taskenc_h2_dim', type=int, default=64, help='The hidden second dimension of task encoder.')
parser.add_argument('--taskenc_final_dim', type=int, default=64, help='The hidden second dimension of task encoder.')
parser.add_argument('--clusters_k', type=int, default=7, help='Cluster numbers of tasks.')
parser.add_argument('--temperature', type=float, default=1.0, help='used for student-t distribution.')
parser.add_argument('--lambda', type=float, default=0.1, help='used to balance the clustering loss and NP loss.')
parser.add_argument('--dec_h1_dim', type=int, default=128, help='The hidden first dimension of encoder.')
parser.add_argument('--dec_h2_dim', type=int, default=128, help='The hidden second dimension of encoder.')
parser.add_argument('--dec_h3_dim', type=int, default=128, help='The hidden third dimension of encoder.')
# used for movie datasets
#parser.add_argument('--num_gender', type=int, default=2, help='User information.')#1
#parser.add_argument('--num_age', type=int, default=7, help='User information.')#1
#parser.add_argument('--num_occupation', type=int, default=21, help='User information.')#1
#parser.add_argument('--num_zipcode', type=int, default=3402, help='User information.')#1
#parser.add_argument('--num_rate', type=int, default=6, help='Item information.')#1
#parser.add_argument('--num_genre', type=int, default=25, help='Item information.')#1
#parser.add_argument('--num_director', type=int, default=2186, help='Item information.')#1
#parser.add_argument('--num_actor', type=int, default=8030, help='Item information.')#1
parser.add_argument('--dropout_rate', type=float, default=0, help='used in encoder and decoder.')
parser.add_argument('--lr', type=float, default=1e-4, help='Applies to SGD and Adagrad.')#1
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=150)#1
parser.add_argument('--batch_size', type=int, default=32)#1
parser.add_argument('--train_ratio', type=float, default=0.7, help='Warm user ratio for training.')#1
parser.add_argument('--valid_ratio', type=float, default=0.1, help='Cold user ratio for validation.')#1
parser.add_argument('--seed', type=int, default=2020)#1
parser.add_argument('--save', type=int, default=0)#1
parser.add_argument('--use_cuda', type=bool, default=torch.cuda.is_available())#1
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')#1
parser.add_argument('--support_size', type=int, default=20)#1
parser.add_argument('--query_size', type=int, default=10)#1
parser.add_argument('--max_len', type=int, default=200, help='The max length of interactions for each user.')
parser.add_argument('--context_min', type=int, default=20, help='Minimum size of context range.')
args = parser.parse_args()
def seed_everything(seed=1023):
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed = args.seed
seed_everything(seed)
if args.cpu:
args.use_cuda = False
elif args.use_cuda:
torch.cuda.manual_seed(args.seed)
opt = vars(args)
# print model info
helper.print_config(opt)
helper.ensure_dir(opt["model_save_dir"], verbose=True)
# save model config
helper.save_config(opt, opt["model_save_dir"] + "/" +opt["id"] + '.config', verbose=True)
# record training log
file_logger = helper.FileLogger(opt["model_save_dir"] + '/' + opt['id'] + ".log",
header="# epoch\ttrain_loss\tprecision5\tNDCG5\tMAP5\tprecision7"
"\tNDCG7\tMAP7\tprecision10\tNDCG10\tMAP10")
preprocess = Preprocess(opt)
print("Preprocess is done.")
print("Create model TaNP...")
opt['uf_dim'] = preprocess.uf_dim
opt['if_dim'] = preprocess.if_dim
trainer = Trainer(opt)
if opt['use_cuda']:
trainer.cuda()
model_filename = "{}/{}.pt".format(opt['model_save_dir'], opt["id"])
# /4 since sup_x, sup_y, query_x, query_y
training_set_size = int(len(os.listdir("{}/{}/{}".format(opt["data_dir"], "training", "log"))) / 4)
supp_xs_s = []
supp_ys_s = []
query_xs_s = []
query_ys_s = []
for idx in range(training_set_size):
supp_xs_s.append(pickle.load(open("{}/{}/{}/supp_x_{}.pkl".format(opt["data_dir"], "training", "log", idx), "rb")))
supp_ys_s.append(pickle.load(open("{}/{}/{}/supp_y_{}.pkl".format(opt["data_dir"], "training", "log", idx), "rb")))
query_xs_s.append(pickle.load(open("{}/{}/{}/query_x_{}.pkl".format(opt["data_dir"], "training", "log", idx), "rb")))
query_ys_s.append(pickle.load(open("{}/{}/{}/query_y_{}.pkl".format(opt["data_dir"], "training", "log", idx), "rb")))
train_dataset = list(zip(supp_xs_s, supp_ys_s, query_xs_s, query_ys_s))
del (supp_xs_s, supp_ys_s, query_xs_s, query_ys_s)
testing_set_size = int(len(os.listdir("{}/{}/{}".format(opt["data_dir"], "testing", "log"))) / 4)
supp_xs_s = []
supp_ys_s = []
query_xs_s = []
query_ys_s = []
for idx in range(testing_set_size):
supp_xs_s.append(
pickle.load(open("{}/{}/{}/supp_x_{}.pkl".format(opt["data_dir"], "testing", "log", idx), "rb")))
supp_ys_s.append(
pickle.load(open("{}/{}/{}/supp_y_{}.pkl".format(opt["data_dir"], "testing", "log", idx), "rb")))
query_xs_s.append(
pickle.load(open("{}/{}/{}/query_x_{}.pkl".format(opt["data_dir"], "testing", "log", idx), "rb")))
query_ys_s.append(
pickle.load(open("{}/{}/{}/query_y_{}.pkl".format(opt["data_dir"], "testing", "log", idx), "rb")))
test_dataset = list(zip(supp_xs_s, supp_ys_s, query_xs_s, query_ys_s))
del (supp_xs_s, supp_ys_s, query_xs_s, query_ys_s)
print("# epoch\ttrain_loss\tprecision5\tNDCG5\tMAP5\tprecision7\tNDCG7\tMAP7\tprecision10\tNDCG10\tMAP10")
if not os.path.exists(model_filename):
print("Start training...")
training(trainer, opt, train_dataset, test_dataset, batch_size=opt['batch_size'], num_epoch=opt['num_epoch'],
model_save=opt["save"], model_filename=model_filename, logger=file_logger)
else:
print("Load pre-trained model...")
opt = helper.load_config(model_filename[:-2]+"config")
helper.print_config(opt)
trained_state_dict = torch.load(model_filename)
trainer.load_state_dict(trained_state_dict)
| 7,850 | 47.462963 | 121 | py |
TaNP | TaNP-main/TaNP/TaNP.py | import torch
import numpy as np
from random import randint
from copy import deepcopy
from torch.autograd import Variable
from torch.nn import functional as F
from collections import OrderedDict
from embeddings_TaNP import Item, User, Encoder, MuSigmaEncoder, Decoder, Gating_Decoder, TaskEncoder, MemoryUnit
import torch.nn as nn
class NP(nn.Module):
def __init__(self, config):
super(NP, self).__init__()
self.x_dim = config['second_embedding_dim'] * 2
# use one-hot or not?
self.y_dim = 1
self.z1_dim = config['z1_dim']
self.z2_dim = config['z2_dim']
# z is the dimension size of mu and sigma.
self.z_dim = config['z_dim']
# the dimension size of rc.
self.enc_h1_dim = config['enc_h1_dim']
self.enc_h2_dim = config['enc_h2_dim']
self.dec_h1_dim = config['dec_h1_dim']
self.dec_h2_dim = config['dec_h2_dim']
self.dec_h3_dim = config['dec_h3_dim']
self.taskenc_h1_dim = config['taskenc_h1_dim']
self.taskenc_h2_dim = config['taskenc_h2_dim']
self.taskenc_final_dim = config['taskenc_final_dim']
self.clusters_k = config['clusters_k']
self.temperture = config['temperature']
self.dropout_rate = config['dropout_rate']
# Initialize networks
self.item_emb = Item(config)
self.user_emb = User(config)
# This encoder is used to generated z actually, it is a latent encoder in ANP.
self.xy_to_z = Encoder(self.x_dim, self.y_dim, self.enc_h1_dim, self.enc_h2_dim, self.z1_dim, self.dropout_rate)
self.z_to_mu_sigma = MuSigmaEncoder(self.z1_dim, self.z2_dim, self.z_dim)
# This encoder is used to generated r actually, it is a deterministic encoder in ANP.
self.xy_to_task = TaskEncoder(self.x_dim, self.y_dim, self.taskenc_h1_dim, self.taskenc_h2_dim, self.taskenc_final_dim,
self.dropout_rate)
self.memoryunit = MemoryUnit(self.clusters_k, self.taskenc_final_dim, self.temperture)
#self.xz_to_y = Gating_Decoder(self.x_dim, self.z_dim, self.taskenc_final_dim, self.dec_h1_dim, self.dec_h2_dim, self.dec_h3_dim, self.y_dim, self.dropout_rate)
self.xz_to_y = Decoder(self.x_dim, self.z_dim, self.taskenc_final_dim, self.dec_h1_dim, self.dec_h2_dim, self.dec_h3_dim, self.y_dim, self.dropout_rate)
def aggregate(self, z_i):
return torch.mean(z_i, dim=0)
def xy_to_mu_sigma(self, x, y):
# Encode each point into a representation r_i
z_i = self.xy_to_z(x, y)
# Aggregate representations r_i into a single representation r
z = self.aggregate(z_i)
# Return parameters of distribution
return self.z_to_mu_sigma(z)
# embedding each (item, user) as the x for np
def embedding(self, x):
if_dim = self.item_emb.feature_dim
item_x = Variable(x[:, 0:if_dim], requires_grad=False).float()
user_x = Variable(x[:, if_dim:], requires_grad=False).float()
item_emb = self.item_emb(item_x)
user_emb = self.user_emb(user_x)
x = torch.cat((item_emb, user_emb), 1)
return x
def forward(self, x_context, y_context, x_target, y_target):
x_context_embed = self.embedding(x_context)
x_target_embed = self.embedding(x_target)
if self.training:
# sigma is log_sigma actually
mu_target, sigma_target, z_target = self.xy_to_mu_sigma(x_target_embed, y_target)
mu_context, sigma_context, z_context = self.xy_to_mu_sigma(x_context_embed, y_context)
task = self.xy_to_task(x_context_embed, y_context)
mean_task = self.aggregate(task)
C_distribution, new_task_embed = self.memoryunit(mean_task)
p_y_pred = self.xz_to_y(x_target_embed, z_target, new_task_embed)
return p_y_pred, mu_target, sigma_target, mu_context, sigma_context, C_distribution
else:
mu_context, sigma_context, z_context = self.xy_to_mu_sigma(x_context_embed, y_context)
task = self.xy_to_task(x_context_embed, y_context)
mean_task = self.aggregate(task)
C_distribution, new_task_embed = self.memoryunit(mean_task)
p_y_pred = self.xz_to_y(x_target_embed, z_context, new_task_embed)
return p_y_pred
class Trainer(torch.nn.Module):
def __init__(self, config):
self.opt = config
super(Trainer, self).__init__()
self.use_cuda = config['use_cuda']
self.np = NP(self.opt)
self._lambda = config['lambda']
self.optimizer = torch.optim.Adam(self.np.parameters(), lr=config['lr'])
# our kl divergence
def kl_div(self, mu_target, logsigma_target, mu_context, logsigma_context):
target_sigma = torch.exp(logsigma_target)
context_sigma = torch.exp(logsigma_context)
kl_div = (logsigma_context - logsigma_target) - 0.5 + (((target_sigma ** 2) + (mu_target - mu_context) ** 2) / 2 * context_sigma ** 2)
#kl_div = (t.exp(posterior_var) + (posterior_mu-prior_mu) ** 2) / t.exp(prior_var) - 1. + (prior_var - posterior_var)
#kl_div = 0.5 * kl_div.sum()
kl_div = kl_div.sum()
return kl_div
# new kl divergence -- kl(st|sc)
def new_kl_div(self, prior_mu, prior_var, posterior_mu, posterior_var):
kl_div = (torch.exp(posterior_var) + (posterior_mu-prior_mu) ** 2) / torch.exp(prior_var) - 1. + (prior_var - posterior_var)
kl_div = 0.5 * kl_div.sum()
return kl_div
def loss(self, p_y_pred, y_target, mu_target, sigma_target, mu_context, sigma_context):
#print('p_y_pred size is ', p_y_pred.size())
regression_loss = F.mse_loss(p_y_pred, y_target.view(-1, 1))
#print('regession loss size is ', regression_loss.size())
# kl divergence between target and context
#print('regession_loss is ', regression_loss.item())
kl = self.new_kl_div(mu_context, sigma_context, mu_target, sigma_target)
#print('KL_loss is ', kl.item())
return regression_loss+kl
def context_target_split(self, support_set_x, support_set_y, query_set_x, query_set_y):
total_x = torch.cat((support_set_x, query_set_x), 0)
total_y = torch.cat((support_set_y, query_set_y), 0)
total_size = total_x.size(0)
context_min = self.opt['context_min']
context_max = self.opt['context_max']
extra_tar_min = self.opt['target_extra_min']
#here we simply use the total_size as the maximum of target size.
num_context = randint(context_min, context_max)
num_target = randint(extra_tar_min, total_size - num_context)
sampled = np.random.choice(total_size, num_context+num_target, replace=False)
x_context = total_x[sampled[:num_context], :]
y_context = total_y[sampled[:num_context]]
x_target = total_x[sampled, :]
y_target = total_y[sampled]
return x_context, y_context, x_target, y_target
def new_context_target_split(self, support_set_x, support_set_y, query_set_x, query_set_y):
total_x = torch.cat((support_set_x, query_set_x), 0)
total_y = torch.cat((support_set_y, query_set_y), 0)
total_size = total_x.size(0)
context_min = self.opt['context_min']
num_context = np.random.randint(context_min, total_size)
num_target = np.random.randint(0, total_size - num_context)
sampled = np.random.choice(total_size, num_context+num_target, replace=False)
x_context = total_x[sampled[:num_context], :]
y_context = total_y[sampled[:num_context]]
x_target = total_x[sampled, :]
y_target = total_y[sampled]
return x_context, y_context, x_target, y_target
def global_update(self, support_set_xs, support_set_ys, query_set_xs, query_set_ys):
batch_sz = len(support_set_xs)
losses = []
C_distribs = []
if self.use_cuda:
for i in range(batch_sz):
support_set_xs[i] = support_set_xs[i].cuda()
support_set_ys[i] = support_set_ys[i].cuda()
query_set_xs[i] = query_set_xs[i].cuda()
query_set_ys[i] = query_set_ys[i].cuda()
for i in range(batch_sz):
x_context, y_context, x_target, y_target = self.new_context_target_split(support_set_xs[i], support_set_ys[i],
query_set_xs[i], query_set_ys[i])
p_y_pred, mu_target, sigma_target, mu_context, sigma_context, C_distribution = self.np(x_context, y_context, x_target,
y_target)
C_distribs.append(C_distribution)
loss = self.loss(p_y_pred, y_target, mu_target, sigma_target, mu_context, sigma_context)
#print('Each task has loss: ', loss)
losses.append(loss)
# calculate target distribution for clustering in batch manner.
# batchsize * k
C_distribs = torch.stack(C_distribs)
# batchsize * k
C_distribs_sq = torch.pow(C_distribs, 2)
# 1*k
C_distribs_sum = torch.sum(C_distribs, dim=0, keepdim=True)
# batchsize * k
temp = C_distribs_sq / C_distribs_sum
# batchsize * 1
temp_sum = torch.sum(temp, dim=1, keepdim=True)
target_distribs = temp / temp_sum
# calculate the kl loss
clustering_loss = self._lambda * F.kl_div(C_distribs.log(), target_distribs, reduction='batchmean')
#print('The clustering loss is %.6f' % (clustering_loss.item()))
np_losses_mean = torch.stack(losses).mean(0)
total_loss = np_losses_mean + clustering_loss
self.optimizer.zero_grad()
total_loss.backward()
self.optimizer.step()
return total_loss.item(), C_distribs.cpu().detach().numpy()
def query_rec(self, support_set_xs, support_set_ys, query_set_xs, query_set_ys):
batch_sz = 1
# used for calculating the rmse.
losses_q = []
if self.use_cuda:
for i in range(batch_sz):
support_set_xs[i] = support_set_xs[i].cuda()
support_set_ys[i] = support_set_ys[i].cuda()
query_set_xs[i] = query_set_xs[i].cuda()
query_set_ys[i] = query_set_ys[i].cuda()
for i in range(batch_sz):
#query_set_y_pred = self.forward(support_set_xs[i], support_set_ys[i], query_set_xs[i], num_local_update)
query_set_y_pred = self.np(support_set_xs[i], support_set_ys[i], query_set_xs[i], query_set_ys[i])
# obtain the mean of gaussian distribution
#(interation_size, y_dim)
#query_set_y_pred = query_set_y_pred.loc.detach()
#print('test_y_pred size is ', query_set_y_pred.size())
loss_q = F.mse_loss(query_set_y_pred, query_set_ys[i].view(-1, 1))
losses_q.append(loss_q)
losses_q = torch.stack(losses_q).mean(0)
output_list, recommendation_list = query_set_y_pred.view(-1).sort(descending=True)
return losses_q.item(), recommendation_list
| 11,208 | 49.040179 | 168 | py |
TaNP | TaNP-main/TaNP/utils/torch_utils.py | """
Utility functions for torch.
"""
import torch
from torch import nn, optim
from torch.optim.optimizer import Optimizer
### class
class MyAdagrad(Optimizer):
"""My modification of the Adagrad optimizer that allows to specify an initial
accumulater value. This mimics the behavior of the default Adagrad implementation
in Tensorflow. The default PyTorch Adagrad uses 0 for initial acculmulator value.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
lr_decay (float, optional): learning rate decay (default: 0)
init_accu_value (float, optional): initial accumulater value.
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-2, lr_decay=0, init_accu_value=0.1, weight_decay=0):
defaults = dict(lr=lr, lr_decay=lr_decay, init_accu_value=init_accu_value, \
weight_decay=weight_decay)
super(MyAdagrad, self).__init__(params, defaults)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['sum'] = torch.ones(p.data.size()).type_as(p.data) *\
init_accu_value
def share_memory(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['sum'].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state['step'] += 1
if group['weight_decay'] != 0:
if p.grad.data.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients ")
grad = grad.add(group['weight_decay'], p.data)
clr = group['lr'] / (1 + (state['step'] - 1) * group['lr_decay'])
if p.grad.data.is_sparse:
grad = grad.coalesce() # the update is non-linear so indices must be unique
grad_indices = grad._indices()
grad_values = grad._values()
size = torch.Size([x for x in grad.size()])
def make_sparse(values):
constructor = type(p.grad.data)
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor()
return constructor(grad_indices, values, size)
state['sum'].add_(make_sparse(grad_values.pow(2)))
std = state['sum']._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state['sum'].addcmul_(1, grad, grad)
std = state['sum'].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss
### torch specific functions
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name in ['adagrad', 'myadagrad']:
# use my own adagrad to allow for init accumulator value
return MyAdagrad(parameters, lr=lr, init_accu_value=0.1, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, weight_decay=l2) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2) # use default lr
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def change_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def flatten_indices(seq_lens, width):
flat = []
for i, l in enumerate(seq_lens):
for j in range(l):
flat.append(i * width + j)
return flat
def set_cuda(var, cuda):
if cuda:
return var.cuda()
return var
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads.
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
### model IO
def save(model, optimizer, opt, filename):
params = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'config': opt
}
try:
torch.save(params, filename)
except BaseException:
print("[ Warning: model saving failed. ]")
def load(model, optimizer, filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
if model is not None:
model.load_state_dict(dump['model'])
if optimizer is not None:
optimizer.load_state_dict(dump['optimizer'])
opt = dump['config']
return model, optimizer, opt
def load_config(filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
return dump['config']
| 5,696 | 32.710059 | 106 | py |
TaNP | TaNP-main/TaNP/utils/loader.py | import json
import random
import torch
import numpy as np
import pickle
import codecs
import re
import os
import datetime
import tqdm
import pandas as pd
#convert userids to userdict key-id(int), val:onehot_vector(tensor)
#element in list is str type.
def to_onehot_dict(list):
dict={}
length = len(list)
for index, element in enumerate(list):
vector = torch.zeros(1, length).long()
element = int(element)
vector[:, element] = 1.0
dict[element] = vector
return dict
def load_list(fname):
list_ = []
with open(fname, encoding="utf-8") as f:
for line in f.readlines():
list_.append(line.strip())
return list_
# used for merge dictionaries.
def merge_key(dict1, dict2):
res = {**dict1, **dict2}
return res
def merge_value(dict1, dict2): # merge and item_cold
for key, value in dict2.items():
if key in dict1.keys():
# if list(set(dict1[key]+value)) the final number of movies-1m is 1000205
new_value = dict1[key]+value
dict1[key] = new_value
else:
print('Unexpected key.')
def count_values(dict):
count_val = 0
for key, value in dict.items():
count_val += len(value)
return count_val
def construct_dictionary(user_list, total_dict):
dict = {}
for i in range(len(user_list)):
dict[str(user_list[i])] = total_dict[str(user_list[i])]
return dict
class Preprocess(object):
"""
Preprocess the training, validation and test data.
Generate the episode-style data.
"""
def __init__(self, opt):
self.batch_size = opt["batch_size"]
self.opt = opt
# warm data ratio
self.train_ratio = opt['train_ratio']
self.valid_ratio = opt['valid_ratio']
self.test_ratio = 1 - self.train_ratio - self.valid_ratio
self.dataset_path = opt["data_dir"]
self.support_size = opt['support_size']
self.query_size = opt['query_size']
self.max_len = opt['max_len']
# save one-hot dimension length
uf_dim, if_dim = self.preprocess(self.dataset_path)
self.uf_dim = uf_dim
self.if_dim = if_dim
def preprocess(self, dataset_path):
""" Preprocess the data and convert to ids. """
#Create training-validation-test datasets
print('Create training, validation and test data from scratch!')
with open('./{}/interaction_dict_x.json'.format(dataset_path), 'r', encoding='utf-8') as f:
inter_dict_x = json.loads(f.read())
with open('./{}/interaction_dict_y.json'.format(dataset_path), 'r', encoding='utf-8') as f:
inter_dict_y = json.loads(f.read())
print('The size of total interactions is %d.' % (count_values(inter_dict_x))) # 42346
assert count_values(inter_dict_x) == count_values(inter_dict_y)
with open('./{}/user_list.json'.format(dataset_path), 'r', encoding='utf-8') as f:
userids = json.loads(f.read())
with open('./{}/item_list.json'.format(dataset_path), 'r', encoding='utf-8') as f:
itemids = json.loads(f.read())
#userids = list(inter_dict_x.keys())
random.shuffle(userids)
warm_user_size = int(len(userids) * self.train_ratio)
valid_user_size = int(len(userids) * self.valid_ratio)
warm_users = userids[:warm_user_size]
valid_users = userids[warm_user_size:warm_user_size+valid_user_size]
cold_users = userids[warm_user_size+valid_user_size:]
assert len(userids) == len(warm_users)+len(valid_users)+len(cold_users)
# Construct the training data dict
training_dict_x = construct_dictionary(warm_users, inter_dict_x)
training_dict_y = construct_dictionary(warm_users, inter_dict_y)
#Avoid the new items shown in test data in the case of cold user.
item_set = set()
for i in training_dict_x.values():
i = set(i)
item_set = item_set.union(i)
# Construct one-hot dictionary
user_dict = to_onehot_dict(userids)
# only items contained in all data are encoded.
item_dict = to_onehot_dict(itemids)
# This part of data is not used, so we do not process it temporally.
valid_dict_x = construct_dictionary(valid_users, inter_dict_x)
valid_dict_y = construct_dictionary(valid_users, inter_dict_y)
assert count_values(valid_dict_x) == count_values(valid_dict_y)
test_dict_x = construct_dictionary(cold_users, inter_dict_x)
test_dict_y = construct_dictionary(cold_users, inter_dict_y)
assert count_values(test_dict_x) == count_values(test_dict_y)
print('Before delete new items in test data, test data has %d interactions.' % (count_values(test_dict_x)))
#Delete the new items in test data.
unseen_count = 0
for key, value in test_dict_x.items():
assert len(value) == len(test_dict_y[key])
unseen_item_index = [index for index, i in enumerate(value) if i not in item_set]
unseen_count+=len(unseen_item_index)
if len(unseen_item_index) == 0:
continue
else:
new_value_x = [element for index, element in enumerate(value) if index not in unseen_item_index]
new_value_y = [test_dict_y[key][index] for index, element in enumerate(value) if index not in unseen_item_index]
test_dict_x[key] = new_value_x
test_dict_y[key] = new_value_y
print('After delete new items in test data, test data has %d interactions.' % (count_values(test_dict_x)))
assert count_values(test_dict_x) == count_values(test_dict_y)
print('The number of total unseen interactions is %d.' % (unseen_count))
pickle.dump(training_dict_x, open("{}/training_dict_x_{:2f}.pkl".format(dataset_path, self.train_ratio), "wb"))
pickle.dump(training_dict_y, open("{}/training_dict_y_{:2f}.pkl".format(dataset_path, self.train_ratio), "wb"))
pickle.dump(valid_dict_x, open("{}/valid_dict_x_{:2f}.pkl".format(dataset_path, self.valid_ratio), "wb"))
pickle.dump(valid_dict_y, open("{}/valid_dict_y_{:2f}.pkl".format(dataset_path, self.valid_ratio), "wb"))
pickle.dump(test_dict_x, open("{}/test_dict_x_{:2f}.pkl".format(dataset_path, self.test_ratio), "wb"))
pickle.dump(test_dict_y, open("{}/test_dict_y_{:2f}.pkl".format(dataset_path, self.test_ratio), "wb"))
def generate_episodes(dict_x, dict_y, category, support_size, query_size, max_len, dir="log"):
idx = 0
if not os.path.exists("{}/{}/{}".format(dataset_path, category, dir)):
os.makedirs("{}/{}/{}".format(dataset_path, category, dir))
os.makedirs("{}/{}/{}".format(dataset_path, category, "evidence"))
for _, user_id in enumerate(dict_x.keys()):
u_id = int(user_id)
seen_music_len = len(dict_x[str(u_id)])
indices = list(range(seen_music_len))
# filter some users with their interactions, i.e., tasks
if seen_music_len < (support_size + query_size) or seen_music_len > max_len:
continue
random.shuffle(indices)
tmp_x = np.array(dict_x[str(u_id)])
tmp_y = np.array(dict_y[str(u_id)])
support_x_app = None
for m_id in tmp_x[indices[:support_size]]:
m_id = int(m_id)
tmp_x_converted = torch.cat((item_dict[m_id], user_dict[u_id]), 1)
try:
support_x_app = torch.cat((support_x_app, tmp_x_converted), 0)
except:
support_x_app = tmp_x_converted
query_x_app = None
for m_id in tmp_x[indices[support_size:]]:
m_id = int(m_id)
u_id = int(user_id)
tmp_x_converted = torch.cat((item_dict[m_id], user_dict[u_id]), 1)
try:
query_x_app = torch.cat((query_x_app, tmp_x_converted), 0)
except:
query_x_app = tmp_x_converted
support_y_app = torch.FloatTensor(tmp_y[indices[:support_size]])
query_y_app = torch.FloatTensor(tmp_y[indices[support_size:]])
pickle.dump(support_x_app, open("{}/{}/{}/supp_x_{}.pkl".format(dataset_path, category, dir, idx), "wb"))
pickle.dump(support_y_app, open("{}/{}/{}/supp_y_{}.pkl".format(dataset_path, category, dir, idx), "wb"))
pickle.dump(query_x_app, open("{}/{}/{}/query_x_{}.pkl".format(dataset_path, category, dir, idx), "wb"))
pickle.dump(query_y_app, open("{}/{}/{}/query_y_{}.pkl".format(dataset_path, category, dir, idx), "wb"))
# used for evidence candidate selection
with open("{}/{}/{}/supp_x_{}_u_m_ids.txt".format(dataset_path, category, "evidence", idx), "w") as f:
for m_id in tmp_x[indices[:support_size]]:
f.write("{}\t{}\n".format(u_id, m_id))
with open("{}/{}/{}/query_x_{}_u_m_ids.txt".format(dataset_path, category, "evidence", idx), "w") as f:
for m_id in tmp_x[indices[support_size:]]:
f.write("{}\t{}\n".format(u_id, m_id))
idx+=1
print("Generate eposide data for training.")
generate_episodes(training_dict_x, training_dict_y, "training", self.support_size, self.query_size, self.max_len)
print("Generate eposide data for validation.")
generate_episodes(valid_dict_x, valid_dict_y, "validation", self.support_size, self.query_size, self.max_len)
print("Generate eposide data for testing.")
generate_episodes(test_dict_x, test_dict_y, "testing", self.support_size, self.query_size, self.max_len)
return len(userids), len(itemids)
| 10,229 | 45.712329 | 128 | py |
crfasrnn | crfasrnn-master/python-scripts/crfasrnn_demo.py | # -*- coding: utf-8 -*-
"""
This package contains code for the "CRF-RNN" semantic image segmentation method, published in the
ICCV 2015 paper Conditional Random Fields as Recurrent Neural Networks. Our software is built on
top of the Caffe deep learning library.
Contact:
Shuai Zheng (szheng@robots.ox.ac.uk), Sadeep Jayasumana (sadeep@robots.ox.ac.uk), Bernardino Romera-Paredes (bernard@robots.ox.ac.uk)
Supervisor:
Philip Torr (philip.torr@eng.ox.ac.uk)
For more information about CRF-RNN, please vist the project website http://crfasrnn.torr.vision.
"""
import sys
import time
import getopt
import os
import numpy as np
from PIL import Image as PILImage
# Path of the Caffe installation.
_CAFFE_ROOT = "../caffe/"
# Model definition and model file paths
_MODEL_DEF_FILE = "TVG_CRFRNN_new_deploy.prototxt" # Contains the network definition
_MODEL_FILE = "TVG_CRFRNN_COCO_VOC.caffemodel" # Contains the trained weights. Download from http://goo.gl/j7PrPZ
sys.path.insert(0, _CAFFE_ROOT + "python")
import caffe
_MAX_DIM = 500
def get_palette(num_cls):
""" Returns the color map for visualizing the segmentation mask.
Args:
num_cls: Number of classes
Returns:
The color map
"""
n = num_cls
palette = [0] * (n * 3)
for j in xrange(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def crfrnn_segmenter(model_def_file, model_file, gpu_device, inputs):
""" Returns the segmentation of the given image.
Args:
model_def_file: File path of the Caffe model definition prototxt file
model_file: File path of the trained model file (contains trained weights)
gpu_device: ID of the GPU device. If using the CPU, set this to -1
inputs: List of images to be segmented
Returns:
The segmented image
"""
assert os.path.isfile(model_def_file), "File {} is missing".format(model_def_file)
assert os.path.isfile(model_file), ("File {} is missing. Please download it using "
"./download_trained_model.sh").format(model_file)
if gpu_device >= 0:
caffe.set_device(gpu_device)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
net = caffe.Net(model_def_file, model_file, caffe.TEST)
num_images = len(inputs)
num_channels = inputs[0].shape[2]
assert num_channels == 3, "Unexpected channel count. A 3-channel RGB image is exptected."
caffe_in = np.zeros((num_images, num_channels, _MAX_DIM, _MAX_DIM), dtype=np.float32)
for ix, in_ in enumerate(inputs):
caffe_in[ix] = in_.transpose((2, 0, 1))
start_time = time.time()
out = net.forward_all(**{net.inputs[0]: caffe_in})
end_time = time.time()
print("Time taken to run the network: {:.4f} seconds".format(end_time - start_time))
predictions = out[net.outputs[0]]
return predictions[0].argmax(axis=0).astype(np.uint8)
def run_crfrnn(input_file, output_file, gpu_device):
""" Runs the CRF-RNN segmentation on the given RGB image and saves the segmentation mask.
Args:
input_file: Input RGB image file (e.g. in JPEG format)
output_file: Path to save the resulting segmentation in PNG format
gpu_device: ID of the GPU device. If using the CPU, set this to -1
"""
input_image = 255 * caffe.io.load_image(input_file)
input_image = resize_image(input_image)
image = PILImage.fromarray(np.uint8(input_image))
image = np.array(image)
palette = get_palette(256)
#PIL reads image in the form of RGB, while cv2 reads image in the form of BGR, mean_vec = [R,G,B]
mean_vec = np.array([123.68, 116.779, 103.939], dtype=np.float32)
mean_vec = mean_vec.reshape(1, 1, 3)
# Rearrange channels to form BGR
im = image[:, :, ::-1]
# Subtract mean
im = im - mean_vec
# Pad as necessary
cur_h, cur_w, cur_c = im.shape
pad_h = _MAX_DIM - cur_h
pad_w = _MAX_DIM - cur_w
im = np.pad(im, pad_width=((0, pad_h), (0, pad_w), (0, 0)), mode='constant', constant_values=0)
# Get predictions
segmentation = crfrnn_segmenter(_MODEL_DEF_FILE, _MODEL_FILE, gpu_device, [im])
segmentation = segmentation[0:cur_h, 0:cur_w]
output_im = PILImage.fromarray(segmentation)
output_im.putpalette(palette)
output_im.save(output_file)
def resize_image(image):
""" Resizes the image so that the largest dimension is not larger than 500 pixels.
If the image's largest dimension is already less than 500, no changes are made.
Args:
Input image
Returns:
Resized image where the largest dimension is less than 500 pixels
"""
width, height = image.shape[0], image.shape[1]
max_dim = max(width, height)
if max_dim > _MAX_DIM:
if height > width:
ratio = float(_MAX_DIM) / height
else:
ratio = float(_MAX_DIM) / width
image = PILImage.fromarray(np.uint8(image))
image = image.resize((int(height * ratio), int(width * ratio)), resample=PILImage.BILINEAR)
image = np.array(image)
return image
def main(argv):
""" Main entry point to the program. """
input_file = "input.jpg"
output_file = "output.png"
gpu_device = -1 # Use -1 to run only on the CPU, use 0-3[7] to run on the GPU
try:
opts, args = getopt.getopt(argv, 'hi:o:g:', ["ifile=", "ofile=", "gpu="])
except getopt.GetoptError:
print("crfasrnn_demo.py -i <input_file> -o <output_file> -g <gpu_device>")
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("crfasrnn_demo.py -i <inputfile> -o <outputfile> -g <gpu_device>")
sys.exit()
elif opt in ("-i", "ifile"):
input_file = arg
elif opt in ("-o", "ofile"):
output_file = arg
elif opt in ("-g", "gpudevice"):
gpu_device = int(arg)
print("Input file: {}".format(input_file))
print("Output file: {}".format(output_file))
if gpu_device >= 0:
print("GPU device ID: {}".format(gpu_device))
else:
print("Using the CPU (set parameters appropriately to use the GPU)")
run_crfrnn(input_file, output_file, gpu_device)
if __name__ == "__main__":
main(sys.argv[1:])
| 6,612 | 31.101942 | 133 | py |
Nested-UNet | Nested-UNet-master/model_logic.py | '''
'''
import keras
import tensorflow as tf
from keras.models import Model
from keras import backend as K
from keras.layers import Input, merge, Conv2D, ZeroPadding2D, UpSampling2D, Dense, concatenate, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers import BatchNormalization, Dropout, Flatten, Lambda
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.optimizers import Adam, RMSprop, SGD
from keras.regularizers import l2
from keras.layers.noise import GaussianDropout
import numpy as np
smooth = 1.
dropout_rate = 0.5
act = "relu"
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# Custom loss function
def dice_coef(y_true, y_pred):
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def bce_dice_loss(y_true, y_pred):
return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
########################################
# 2D Standard
########################################
def standard_unit(input_tensor, stage, nb_filter, kernel_size=3):
x = Conv2D(nb_filter, (kernel_size, kernel_size), activation=act, name='conv'+stage+'_1', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(input_tensor)
x = Dropout(dropout_rate, name='dp'+stage+'_1')(x)
x = Conv2D(nb_filter, (kernel_size, kernel_size), activation=act, name='conv'+stage+'_2', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(x)
x = Dropout(dropout_rate, name='dp'+stage+'_2')(x)
return x
########################################
"""
Standard U-Net [Ronneberger et.al, 2015]
Total params: 7,759,521
"""
def U_Net(img_rows, img_cols, color_type=1, num_class=1):
nb_filter = [32,64,128,256,512]
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')
conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1], name='merge33', axis=bn_axis)
conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1], name='merge24', axis=bn_axis)
conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1], name='merge15', axis=bn_axis)
conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])
unet_output = Conv2D(num_class, (1, 1), activation='sigmoid', name='output', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
model = Model(input=img_input, output=unet_output)
return model
"""
wU-Net for comparison
Total params: 9,282,246
"""
def wU_Net(img_rows, img_cols, color_type=1, num_class=1):
# nb_filter = [32,64,128,256,512]
nb_filter = [35,70,140,280,560]
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')
conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1], name='merge33', axis=bn_axis)
conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1], name='merge24', axis=bn_axis)
conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1], name='merge15', axis=bn_axis)
conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])
unet_output = Conv2D(num_class, (1, 1), activation='sigmoid', name='output', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
model = Model(input=img_input, output=unet_output)
return model
"""
Standard UNet++ [Zhou et.al, 2018]
Total params: 9,041,601
"""
def Nest_Net(img_rows, img_cols, color_type=1, num_class=1, deep_supervision=False):
nb_filter = [32,64,128,256,512]
# Handle Dimension Ordering for different backends
global bn_axis
if K.image_dim_ordering() == 'tf':
bn_axis = 3
img_input = Input(shape=(img_rows, img_cols, color_type), name='main_input')
else:
bn_axis = 1
img_input = Input(shape=(color_type, img_rows, img_cols), name='main_input')
conv1_1 = standard_unit(img_input, stage='11', nb_filter=nb_filter[0])
pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)
conv2_1 = standard_unit(pool1, stage='21', nb_filter=nb_filter[1])
pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)
up1_2 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up12', padding='same')(conv2_1)
conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)
conv1_2 = standard_unit(conv1_2, stage='12', nb_filter=nb_filter[0])
conv3_1 = standard_unit(pool2, stage='31', nb_filter=nb_filter[2])
pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)
up2_2 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up22', padding='same')(conv3_1)
conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
conv2_2 = standard_unit(conv2_2, stage='22', nb_filter=nb_filter[1])
up1_3 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up13', padding='same')(conv2_2)
conv1_3 = concatenate([up1_3, conv1_1, conv1_2], name='merge13', axis=bn_axis)
conv1_3 = standard_unit(conv1_3, stage='13', nb_filter=nb_filter[0])
conv4_1 = standard_unit(pool3, stage='41', nb_filter=nb_filter[3])
pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)
up3_2 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up32', padding='same')(conv4_1)
conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
conv3_2 = standard_unit(conv3_2, stage='32', nb_filter=nb_filter[2])
up2_3 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up23', padding='same')(conv3_2)
conv2_3 = concatenate([up2_3, conv2_1, conv2_2], name='merge23', axis=bn_axis)
conv2_3 = standard_unit(conv2_3, stage='23', nb_filter=nb_filter[1])
up1_4 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up14', padding='same')(conv2_3)
conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3], name='merge14', axis=bn_axis)
conv1_4 = standard_unit(conv1_4, stage='14', nb_filter=nb_filter[0])
conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
up4_2 = Conv2DTranspose(nb_filter[3], (2, 2), strides=(2, 2), name='up42', padding='same')(conv5_1)
conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
conv4_2 = standard_unit(conv4_2, stage='42', nb_filter=nb_filter[3])
up3_3 = Conv2DTranspose(nb_filter[2], (2, 2), strides=(2, 2), name='up33', padding='same')(conv4_2)
conv3_3 = concatenate([up3_3, conv3_1, conv3_2], name='merge33', axis=bn_axis)
conv3_3 = standard_unit(conv3_3, stage='33', nb_filter=nb_filter[2])
up2_4 = Conv2DTranspose(nb_filter[1], (2, 2), strides=(2, 2), name='up24', padding='same')(conv3_3)
conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3], name='merge24', axis=bn_axis)
conv2_4 = standard_unit(conv2_4, stage='24', nb_filter=nb_filter[1])
up1_5 = Conv2DTranspose(nb_filter[0], (2, 2), strides=(2, 2), name='up15', padding='same')(conv2_4)
conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4], name='merge15', axis=bn_axis)
conv1_5 = standard_unit(conv1_5, stage='15', nb_filter=nb_filter[0])
nestnet_output_1 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_1', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_2)
nestnet_output_2 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_2', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_3)
nestnet_output_3 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_3', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_4)
nestnet_output_4 = Conv2D(num_class, (1, 1), activation='sigmoid', name='output_4', kernel_initializer = 'he_normal', padding='same', kernel_regularizer=l2(1e-4))(conv1_5)
if deep_supervision:
model = Model(input=img_input, output=[nestnet_output_1,
nestnet_output_2,
nestnet_output_3,
nestnet_output_4])
else:
model = Model(input=img_input, output=[nestnet_output_4])
return model
if __name__ == '__main__':
model = U_Net(96,96,1)
model.summary()
model = wU_Net(96,96,1)
model.summary()
model = Nest_Net(96,96,1)
model.summary()
| 12,054 | 44.149813 | 186 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/xnet/model.py | from .builder import build_xnet
from ..utils import freeze_model
from ..backbones import get_backbone
DEFAULT_SKIP_CONNECTIONS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2', 'block1_conv2',
'block5_pool', 'block4_pool', 'block3_pool', 'block2_pool', 'block1_pool',
),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4', 'block2_conv2', 'block1_conv2',
'block5_pool', 'block4_pool', 'block3_pool', 'block2_pool', 'block1_pool',
),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'stage4_unit1_relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'stage4_unit1_relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'inceptionv3': (228, 86, 16, 9),
'inceptionresnetv2': (594, 260, 16, 9),
'densenet121': (311, 139, 51, 4),
'densenet169': (367, 139, 51, 4),
'densenet201': (479, 139, 51, 4),
}
def Xnet(backbone_name='vgg16',
input_shape=(None, None, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
skip_connections='default',
decoder_block_type='upsampling',
decoder_filters=(256,128,64,32,16),
decoder_use_batchnorm=True,
n_upsample_blocks=5,
upsample_rates=(2,2,2,2,2),
classes=1,
activation='sigmoid'):
"""
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C)
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
'dof' (pre-training on DoF)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Useful for fine-tuning
skip_connections: if 'default' is used take default skip connections,
else provide a list of layer numbers or names starting from top of model
decoder_block_type: (str) one of 'upsampling' and 'transpose' (look at blocks.py)
decoder_filters: (int) number of convolution layer filters in decoder blocks
decoder_use_batchnorm: (bool) if True add batch normalisation layer between `Conv2D` ad `Activation` layers
n_upsample_blocks: (int) a number of upsampling blocks
upsample_rates: (tuple of int) upsampling rates decoder blocks
classes: (int) a number of classes for output
activation: (str) one of keras activations for last model layer
Returns:
keras.models.Model instance
"""
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
if skip_connections == 'default':
skip_connections = DEFAULT_SKIP_CONNECTIONS[backbone_name]
# n_upsample_blocks = len(skip_connections)
model = build_xnet(backbone,
classes,
skip_connections,
decoder_filters=decoder_filters,
block_type=decoder_block_type,
activation=activation,
n_upsample_blocks=n_upsample_blocks,
upsample_rates=upsample_rates,
use_batchnorm=decoder_use_batchnorm)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'x-{}'.format(backbone_name)
return model | 5,117 | 46.388889 | 115 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/xnet/builder.py | from keras.layers import Conv2D
from keras.layers import Activation
from keras.models import Model
from .blocks import Transpose2D_block
from .blocks import Upsample2D_block
from ..utils import get_layer_number, to_tuple
import copy
def build_xnet(backbone, classes, skip_connection_layers,
decoder_filters=(256,128,64,32,16),
upsample_rates=(2,2,2,2,2),
n_upsample_blocks=5,
block_type='upsampling',
activation='sigmoid',
use_batchnorm=True):
input = backbone.input
# print(n_upsample_blocks)
if block_type == 'transpose':
up_block = Transpose2D_block
else:
up_block = Upsample2D_block
if len(skip_connection_layers) > n_upsample_blocks:
downsampling_layers = skip_connection_layers[int(len(skip_connection_layers)/2):]
skip_connection_layers = skip_connection_layers[:int(len(skip_connection_layers)/2)]
else:
downsampling_layers = skip_connection_layers
# convert layer names to indices
skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in skip_connection_layers])
skip_layers_list = [backbone.layers[skip_connection_idx[i]].output for i in range(len(skip_connection_idx))]
downsampling_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in downsampling_layers])
downsampling_list = [backbone.layers[downsampling_idx[i]].output for i in range(len(downsampling_idx))]
downterm = [None] * (n_upsample_blocks+1)
for i in range(len(downsampling_idx)):
# print(downsampling_list[0])
# print(backbone.output)
# print("")
if downsampling_list[0] == backbone.output:
# print("VGG16 should be!")
downterm[n_upsample_blocks-i] = downsampling_list[i]
else:
downterm[n_upsample_blocks-i-1] = downsampling_list[i]
downterm[-1] = backbone.output
# print("downterm = {}".format(downterm))
interm = [None] * (n_upsample_blocks+1) * (n_upsample_blocks+1)
for i in range(len(skip_connection_idx)):
interm[-i*(n_upsample_blocks+1)+(n_upsample_blocks+1)*(n_upsample_blocks-1)] = skip_layers_list[i]
interm[(n_upsample_blocks+1)*n_upsample_blocks] = backbone.output
for j in range(n_upsample_blocks):
for i in range(n_upsample_blocks-j):
upsample_rate = to_tuple(upsample_rates[i])
# print(j, i)
if i == 0 and j < n_upsample_blocks-1 and len(skip_connection_layers) < n_upsample_blocks:
interm[(n_upsample_blocks+1)*i+j+1] = None
elif j == 0:
if downterm[i+1] is not None:
interm[(n_upsample_blocks+1)*i+j+1] = up_block(decoder_filters[n_upsample_blocks-i-2],
i+1, j+1, upsample_rate=upsample_rate,
skip=interm[(n_upsample_blocks+1)*i+j],
use_batchnorm=use_batchnorm)(downterm[i+1])
else:
interm[(n_upsample_blocks+1)*i+j+1] = None
# print("\n{} = {} + {}\n".format(interm[(n_upsample_blocks+1)*i+j+1],
# interm[(n_upsample_blocks+1)*i+j],
# downterm[i+1]))
else:
interm[(n_upsample_blocks+1)*i+j+1] = up_block(decoder_filters[n_upsample_blocks-i-2],
i+1, j+1, upsample_rate=upsample_rate,
skip=interm[(n_upsample_blocks+1)*i : (n_upsample_blocks+1)*i+j+1],
use_batchnorm=use_batchnorm)(interm[(n_upsample_blocks+1)*(i+1)+j])
# print("\n{} = {} + {}\n".format(interm[(n_upsample_blocks+1)*i+j+1],
# interm[(n_upsample_blocks+1)*i : (n_upsample_blocks+1)*i+j+1],
# interm[(n_upsample_blocks+1)*(i+1)+j]))
# print('\n\n\n')
# for x in range(n_upsample_blocks+1):
# for y in range(n_upsample_blocks+1):
# print(interm[x*(n_upsample_blocks+1)+y], end=' ', flush=True)
# print('\n')
# print('\n\n\n')
#print(interm)
"""
for i in range(n_upsample_blocks-2):
interm = []
x = skip_layers_list[n_upsample_blocks-i-2]
x = {}
for stage in range(n_upsample_blocks-1):
i = n_upsample_blocks - stage - 1
x = backbone.layers[skip_connection_idx[i-1]].output
for col in range(stage+1):
print("i = {}, col = {}, index = {}".format(i, col, i+col))
skip_connection = None
if i-col < len(skip_connection_idx):
skip_connection = skip_layers_list[i-col]
upsample_rate = to_tuple(upsample_rates[i-col])
x = up_block(decoder_filters[i-col], stage-col+1, col+1, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
skip_layers_list[i+col] = x
x = backbone.output
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
# skip_connection = backbone.layers[skip_connection_idx[i]].output
skip_connection = skip_layers_list[i]
upsample_rate = to_tuple(upsample_rates[i])
x = up_block(decoder_filters[i], n_upsample_blocks-i, 0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
"""
"""
i = n_upsample_blocks - 1
xx = backbone.layers[skip_connection_idx[i-0-1]].output
skip_connection = skip_layers_list[i-0]
upsample_rate = to_tuple(upsample_rates[i-0])
xx = up_block(decoder_filters[i-0], n_upsample_blocks-i-0, 1+0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(xx)
skip_layers_list[i-0] = xx
i = n_upsample_blocks - 2
xx = backbone.layers[skip_connection_idx[i-0-1]].output
skip_connection = skip_layers_list[i-0]
upsample_rate = to_tuple(upsample_rates[i-0])
xx = up_block(decoder_filters[i-0], n_upsample_blocks-i-0, 1+0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(xx)
skip_layers_list[i-0] = xx
skip_connection = skip_layers_list[i-1]
upsample_rate = to_tuple(upsample_rates[i-1])
xx = up_block(decoder_filters[i-1], n_upsample_blocks-i-1, 1+1, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(xx)
skip_layers_list[i-1] = xx
"""
"""
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
# skip_connection = backbone.layers[skip_connection_idx[i]].output
skip_connection = skip_layers_list[i]
upsample_rate = to_tuple(upsample_rates[i])
x = up_block(decoder_filters[i], n_upsample_blocks-i, 0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
"""
x = Conv2D(classes, (3,3), padding='same', name='final_conv')(interm[n_upsample_blocks])
x = Activation(activation, name=activation)(x)
model = Model(input, x)
return model
| 7,554 | 41.926136 | 112 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/xnet/blocks.py | from keras.layers import Conv2DTranspose
from keras.layers import UpSampling2D
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Concatenate
def handle_block_names(stage, cols):
conv_name = 'decoder_stage{}-{}_conv'.format(stage, cols)
bn_name = 'decoder_stage{}-{}_bn'.format(stage, cols)
relu_name = 'decoder_stage{}-{}_relu'.format(stage, cols)
up_name = 'decoder_stage{}-{}_upsample'.format(stage, cols)
merge_name = 'merge_{}-{}'.format(stage, cols)
return conv_name, bn_name, relu_name, up_name, merge_name
def ConvRelu(filters, kernel_size, use_batchnorm=False, conv_name='conv', bn_name='bn', relu_name='relu'):
def layer(x):
x = Conv2D(filters, kernel_size, padding="same", name=conv_name, use_bias=not(use_batchnorm))(x)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def Upsample2D_block(filters, stage, cols, kernel_size=(3,3), upsample_rate=(2,2),
use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name, merge_name = handle_block_names(stage, cols)
x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)
if skip is not None:
x = Concatenate(name=merge_name)([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def Transpose2D_block(filters, stage, cols, kernel_size=(3,3), upsample_rate=(2,2),
transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name, merge_name = handle_block_names(stage, cols)
x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=bn_name+'1')(x)
x = Activation('relu', name=relu_name+'1')(x)
if (type(skip) != list and skip is not None) or (type(skip) == list and None not in skip):
# print("\nskip = {}".format(skip))
if type(skip) is list:
merge_list = []
merge_list.append(x)
for l in skip:
merge_list.append(l)
x = Concatenate(name=merge_name)(merge_list)
else:
x = Concatenate(name=merge_name)([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer | 3,112 | 38.910256 | 106 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/common/functions.py | import numpy as np
import tensorflow as tf
def transpose_shape(shape, target_format, spatial_axes):
"""Converts a tuple or a list to the correct `data_format`.
It does so by switching the positions of its elements.
# Arguments
shape: Tuple or list, often representing shape,
corresponding to `'channels_last'`.
target_format: A string, either `'channels_first'` or `'channels_last'`.
spatial_axes: A tuple of integers.
Correspond to the indexes of the spatial axes.
For example, if you pass a shape
representing (batch_size, timesteps, rows, cols, channels),
then `spatial_axes=(2, 3)`.
# Returns
A tuple or list, with the elements permuted according
to `target_format`.
# Example
# Raises
ValueError: if `value` or the global `data_format` invalid.
"""
if target_format == 'channels_first':
new_values = shape[:spatial_axes[0]]
new_values += (shape[-1],)
new_values += tuple(shape[x] for x in spatial_axes)
if isinstance(shape, list):
return list(new_values)
return new_values
elif target_format == 'channels_last':
return shape
else:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(target_format))
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
# Arguments
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
# Returns
A tensor.
"""
return tf.transpose(x, perm=pattern)
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
# Arguments
x: Tensor or variable.
# Returns
A tuple of integers (or None entries).
"""
if hasattr(x, '_keras_shape'):
return x._keras_shape
try:
return tuple(x.get_shape().as_list())
except ValueError:
return None
def resize_images(x,
height_factor,
width_factor,
data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
# Arguments
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: string, `"channels_last"` or `"channels_first"`.
interpolation: A string, one of `nearest` or `bilinear`.
# Returns
A tensor.
# Raises
ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
else:
rows, cols = 1, 2
original_shape = int_shape(x)
new_shape = tf.shape(x)[rows:cols + 1]
new_shape *= tf.constant(np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = tf.image.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = tf.image.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
output_shape = (None, new_height, new_width, None)
x.set_shape(transpose_shape(output_shape, data_format, spatial_axes=(1, 2)))
return x | 3,882 | 32.188034 | 88 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/common/layers.py | from keras.engine import Layer
from keras.engine import InputSpec
from keras.utils import conv_utils
from keras.legacy import interfaces
from keras.utils.generic_utils import get_custom_objects
from .functions import resize_images
class ResizeImage(Layer):
"""ResizeImage layer for 2D inputs.
Repeats the rows and columns of the data
by factor[0] and factor[1] respectively.
# Arguments
factor: int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Note that CNTK does not support yet the `bilinear` upscaling
and that with Theano, only `factor=(2, 2)` is possible.
# Input shape
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, rows, cols)`
# Output shape
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch, channels, upsampled_rows, upsampled_cols)`
"""
@interfaces.legacy_upsampling2d_support
def __init__(self, factor=(2, 2), data_format='channels_last', interpolation='nearest', **kwargs):
super(ResizeImage, self).__init__(**kwargs)
self.data_format = data_format
self.factor = conv_utils.normalize_tuple(factor, 2, 'factor')
self.input_spec = InputSpec(ndim=4)
if interpolation not in ['nearest', 'bilinear']:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
self.interpolation = interpolation
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.factor[0] * input_shape[2] if input_shape[2] is not None else None
width = self.factor[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.factor[0] * input_shape[1] if input_shape[1] is not None else None
width = self.factor[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
return resize_images(inputs, self.factor[0], self.factor[1],
self.data_format, self.interpolation)
def get_config(self):
config = {'factor': self.factor,
'data_format': self.data_format}
base_config = super(ResizeImage, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
get_custom_objects().update({'ResizeImage': ResizeImage})
| 3,623 | 42.662651 | 102 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/common/blocks.py | from keras.layers import Conv2D
from keras.layers import Activation
from keras.layers import BatchNormalization
def Conv2DBlock(n_filters, kernel_size,
activation='relu',
use_batchnorm=True,
name='conv_block',
**kwargs):
"""Extension of Conv2D layer with batchnorm"""
def layer(input_tensor):
x = Conv2D(n_filters, kernel_size, use_bias=not(use_batchnorm),
name=name+'_conv', **kwargs)(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=name+'_bn',)(x)
x = Activation(activation, name=name+'_'+activation)(x)
return x
return layer | 682 | 31.52381 | 71 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/pspnet/model.py | from .builder import build_psp
from ..utils import freeze_model
from ..backbones import get_backbone
PSP_BASE_LAYERS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'inceptionv3': (228, 86, 16),
'inceptionresnetv2': (594, 260, 16),
'densenet121': (311, 139, 51),
'densenet169': (367, 139, 51),
'densenet201': (479, 139, 51),
}
def _get_layer_by_factor(backbone_name, factor):
if factor == 4:
return PSP_BASE_LAYERS[backbone_name][-1]
elif factor == 8:
return PSP_BASE_LAYERS[backbone_name][-2]
elif factor == 16:
return PSP_BASE_LAYERS[backbone_name][-3]
else:
raise ValueError('Unsupported factor - `{}`, Use 4, 8 or 16.'.format(factor))
def _shape_guard(factor, shape):
h, w = shape[:2]
min_size = factor * 6
res = (h % min_size != 0 or w % min_size != 0 or
h < min_size or w < min_size)
if res:
raise ValueError('Wrong shape {}, input H and W should '.format(shape) +
'be divisible by `{}`'.format(min_size))
def PSPNet(backbone_name='vgg16',
input_shape=(384, 384, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
downsample_factor=8,
psp_conv_filters=512,
psp_pooling_type='avg',
use_batchnorm=True,
dropout=None,
final_interpolation='bilinear',
classes=21,
activation='softmax'):
"""
Exploit the capability of global context information by different-regionbased
context aggregation through pyramid pooling module together with the proposed
pyramid scene parsing network (PSPNet).
https://arxiv.org/pdf/1612.01105.pdf
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C).
H and W should be divisible by (6 * `downsample_factor`) and **NOT** `None`!
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization), 'imagenet' (pre-
training on ImageNet)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Use-
ful for fine-tuning
downsample_factor: int, one of 4, 8 and 16. Specify layer of backbone or
backbone depth to construct PSP module on it.
psp_conv_filters: (int), number of filters in `Conv2D` layer in each psp block
psp_pooling_type: 'avg' or 'max', psp block pooling type (maximum or average)
use_batchnorm: (bool) if True add batch normalisation layer between
`Conv2D` ad `Activation` layers
dropout: None or float in range 0-1, if specified add SpatialDropout after PSP module
final_interpolation: 'duc' or 'bilinear' - interpolation type for final
upsampling layer.
classes: (int) a number of classes for output
activation: (str) one of keras activations
Returns:
keras Model instance
"""
# control image input shape
_shape_guard(downsample_factor, input_shape)
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
psp_layer = _get_layer_by_factor(backbone_name, downsample_factor)
model = build_psp(backbone,
psp_layer,
last_upsampling_factor=downsample_factor,
classes=classes,
conv_filters=psp_conv_filters,
pooling_type=psp_pooling_type,
activation=activation,
use_batchnorm=use_batchnorm,
dropout=dropout,
final_interpolation=final_interpolation)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'psp-{}'.format(backbone_name)
return model | 4,891 | 39.429752 | 95 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/pspnet/builder.py | """
Code is constructed based on following repositories:
https://github.com/ykamikawa/PSPNet/
https://github.com/hujh14/PSPNet-Keras/
https://github.com/Vladkryvoruchko/PSPNet-Keras-tensorflow/
And original paper of PSPNet:
https://arxiv.org/pdf/1612.01105.pdf
"""
from keras.layers import Conv2D
from keras.layers import Activation
from keras.layers import SpatialDropout2D
from keras.models import Model
from .blocks import PyramidPoolingModule, DUC
from ..common import Conv2DBlock
from ..common import ResizeImage
from ..utils import extract_outputs
from ..utils import to_tuple
def build_psp(backbone,
psp_layer,
last_upsampling_factor,
classes=21,
activation='softmax',
conv_filters=512,
pooling_type='avg',
dropout=None,
final_interpolation='bilinear',
use_batchnorm=True):
input = backbone.input
x = extract_outputs(backbone, [psp_layer])[0]
x = PyramidPoolingModule(
conv_filters=conv_filters,
pooling_type=pooling_type,
use_batchnorm=use_batchnorm)(x)
x = Conv2DBlock(512, (1, 1), activation='relu', padding='same',
use_batchnorm=use_batchnorm)(x)
if dropout is not None:
x = SpatialDropout2D(dropout)(x)
x = Conv2D(classes, (3,3), padding='same', name='final_conv')(x)
if final_interpolation == 'bilinear':
x = ResizeImage(to_tuple(last_upsampling_factor))(x)
elif final_interpolation == 'duc':
x = DUC(to_tuple(last_upsampling_factor))(x)
else:
raise ValueError('Unsupported interpolation type {}. '.format(final_interpolation) +
'Use `duc` or `bilinear`.')
x = Activation(activation, name=activation)(x)
model = Model(input, x)
return model
| 1,860 | 28.078125 | 92 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/pspnet/blocks.py | import numpy as np
from keras.layers import MaxPool2D
from keras.layers import AveragePooling2D
from keras.layers import Concatenate
from keras.layers import Permute
from keras.layers import Reshape
from keras.backend import int_shape
from ..common import Conv2DBlock
from ..common import ResizeImage
def InterpBlock(level, feature_map_shape,
conv_filters=512,
conv_kernel_size=(1,1),
conv_padding='same',
pooling_type='avg',
pool_padding='same',
use_batchnorm=True,
activation='relu',
interpolation='bilinear'):
if pooling_type == 'max':
Pool2D = MaxPool2D
elif pooling_type == 'avg':
Pool2D = AveragePooling2D
else:
raise ValueError('Unsupported pooling type - `{}`.'.format(pooling_type) +
'Use `avg` or `max`.')
def layer(input_tensor):
# Compute the kernel and stride sizes according to how large the final feature map will be
# When the kernel factor and strides are equal, then we can compute the final feature map factor
# by simply dividing the current factor by the kernel or stride factor
# The final feature map sizes are 1x1, 2x2, 3x3, and 6x6. We round to the closest integer
pool_size = [int(np.round(feature_map_shape[0] / level)),
int(np.round(feature_map_shape[1] / level))]
strides = pool_size
x = Pool2D(pool_size, strides=strides, padding=pool_padding)(input_tensor)
x = Conv2DBlock(conv_filters,
kernel_size=conv_kernel_size,
padding=conv_padding,
use_batchnorm=use_batchnorm,
activation=activation,
name='level{}'.format(level))(x)
x = ResizeImage(strides, interpolation=interpolation)(x)
return x
return layer
def DUC(factor=(8, 8)):
if factor[0] != factor[1]:
raise ValueError('DUC upconvolution support only equal factors, '
'got {}'.format(factor))
factor = factor[0]
def layer(input_tensor):
h, w, c = int_shape(input_tensor)[1:]
H = h * factor
W = w * factor
x = Conv2DBlock(c*factor**2, (1,1),
padding='same',
name='duc_{}'.format(factor))(input_tensor)
x = Permute((3, 1, 2))(x)
x = Reshape((c, factor, factor, h, w))(x)
x = Permute((1, 4, 2, 5, 3))(x)
x = Reshape((c, H, W))(x)
x = Permute((2, 3, 1))(x)
return x
return layer
def PyramidPoolingModule(**params):
"""
Build the Pyramid Pooling Module.
"""
_params = {
'conv_filters': 512,
'conv_kernel_size': (1, 1),
'conv_padding': 'same',
'pooling_type': 'avg',
'pool_padding': 'same',
'use_batchnorm': True,
'activation': 'relu',
'interpolation': 'bilinear',
}
_params.update(params)
def module(input_tensor):
feature_map_shape = int_shape(input_tensor)[1:3]
x1 = InterpBlock(1, feature_map_shape, **_params)(input_tensor)
x2 = InterpBlock(2, feature_map_shape, **_params)(input_tensor)
x3 = InterpBlock(3, feature_map_shape, **_params)(input_tensor)
x6 = InterpBlock(6, feature_map_shape, **_params)(input_tensor)
x = Concatenate()([input_tensor, x1, x2, x3, x6])
return x
return module | 3,539 | 32.396226 | 104 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/nestnet/model.py | from .builder import build_nestnet
from ..utils import freeze_model
from ..backbones import get_backbone
DEFAULT_SKIP_CONNECTIONS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2', 'block1_conv2',
'block5_pool', 'block4_pool', 'block3_pool', 'block2_pool', 'block1_pool',
),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4', 'block2_conv2', 'block1_conv2',
'block5_pool', 'block4_pool', 'block3_pool', 'block2_pool', 'block1_pool',
),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'stage4_unit1_relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0',
'stage4_unit1_relu1', 'stage3_unit2_relu1', 'stage2_unit2_relu1', 'stage1_unit2_relu1',
),
'inceptionv3': (228, 86, 16, 9),
'inceptionresnetv2': (594, 260, 16, 9),
'densenet121': (311, 139, 51, 4),
'densenet169': (367, 139, 51, 4),
'densenet201': (479, 139, 51, 4),
}
def Nestnet(backbone_name='vgg16',
input_shape=(None, None, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
skip_connections='default',
decoder_block_type='upsampling',
decoder_filters=(256,128,64,32,16),
decoder_use_batchnorm=True,
n_upsample_blocks=5,
upsample_rates=(2,2,2,2,2),
classes=1,
activation='sigmoid'):
"""
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C)
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
'dof' (pre-training on DoF)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Useful for fine-tuning
skip_connections: if 'default' is used take default skip connections,
else provide a list of layer numbers or names starting from top of model
decoder_block_type: (str) one of 'upsampling' and 'transpose' (look at blocks.py)
decoder_filters: (int) number of convolution layer filters in decoder blocks
decoder_use_batchnorm: (bool) if True add batch normalisation layer between `Conv2D` ad `Activation` layers
n_upsample_blocks: (int) a number of upsampling blocks
upsample_rates: (tuple of int) upsampling rates decoder blocks
classes: (int) a number of classes for output
activation: (str) one of keras activations for last model layer
Returns:
keras.models.Model instance
"""
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
if skip_connections == 'default':
skip_connections = DEFAULT_SKIP_CONNECTIONS[backbone_name]
# n_upsample_blocks = len(skip_connections)
model = build_nestnet(backbone,
classes,
skip_connections,
decoder_filters=decoder_filters,
block_type=decoder_block_type,
activation=activation,
n_upsample_blocks=n_upsample_blocks,
upsample_rates=upsample_rates,
use_batchnorm=decoder_use_batchnorm)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'nest-{}'.format(backbone_name)
return model | 5,129 | 46.5 | 115 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/nestnet/builder.py | from keras.layers import Conv2D
from keras.layers import Activation
from keras.models import Model
from .blocks import Transpose2D_block
from .blocks import Upsample2D_block
from ..utils import get_layer_number, to_tuple
import copy
def build_nestnet(backbone, classes, skip_connection_layers,
decoder_filters=(256,128,64,32,16),
upsample_rates=(2,2,2,2,2),
n_upsample_blocks=5,
block_type='upsampling',
activation='sigmoid',
use_batchnorm=True):
input = backbone.input
# print(n_upsample_blocks)
if block_type == 'transpose':
up_block = Transpose2D_block
else:
up_block = Upsample2D_block
if len(skip_connection_layers) > n_upsample_blocks:
downsampling_layers = skip_connection_layers[int(len(skip_connection_layers)/2):]
skip_connection_layers = skip_connection_layers[:int(len(skip_connection_layers)/2)]
else:
downsampling_layers = skip_connection_layers
# convert layer names to indices
skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in skip_connection_layers])
skip_layers_list = [backbone.layers[skip_connection_idx[i]].output for i in range(len(skip_connection_idx))]
downsampling_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in downsampling_layers])
downsampling_list = [backbone.layers[downsampling_idx[i]].output for i in range(len(downsampling_idx))]
downterm = [None] * (n_upsample_blocks+1)
for i in range(len(downsampling_idx)):
# print(downsampling_list[0])
# print(backbone.output)
# print("")
if downsampling_list[0] == backbone.output:
# print("VGG16 should be!")
downterm[n_upsample_blocks-i] = downsampling_list[i]
else:
downterm[n_upsample_blocks-i-1] = downsampling_list[i]
downterm[-1] = backbone.output
# print("downterm = {}".format(downterm))
interm = [None] * (n_upsample_blocks+1) * (n_upsample_blocks+1)
for i in range(len(skip_connection_idx)):
interm[-i*(n_upsample_blocks+1)+(n_upsample_blocks+1)*(n_upsample_blocks-1)] = skip_layers_list[i]
interm[(n_upsample_blocks+1)*n_upsample_blocks] = backbone.output
for j in range(n_upsample_blocks):
for i in range(n_upsample_blocks-j):
upsample_rate = to_tuple(upsample_rates[i])
# print(j, i)
if i == 0 and j < n_upsample_blocks-1 and len(skip_connection_layers) < n_upsample_blocks:
interm[(n_upsample_blocks+1)*i+j+1] = None
elif j == 0:
if downterm[i+1] is not None:
interm[(n_upsample_blocks+1)*i+j+1] = up_block(decoder_filters[n_upsample_blocks-i-2],
i+1, j+1, upsample_rate=upsample_rate,
skip=interm[(n_upsample_blocks+1)*i+j],
use_batchnorm=use_batchnorm)(downterm[i+1])
else:
interm[(n_upsample_blocks+1)*i+j+1] = None
# print("\n{} = {} + {}\n".format(interm[(n_upsample_blocks+1)*i+j+1],
# interm[(n_upsample_blocks+1)*i+j],
# downterm[i+1]))
else:
interm[(n_upsample_blocks+1)*i+j+1] = up_block(decoder_filters[n_upsample_blocks-i-2],
i+1, j+1, upsample_rate=upsample_rate,
skip=interm[(n_upsample_blocks+1)*i+j],
use_batchnorm=use_batchnorm)(interm[(n_upsample_blocks+1)*(i+1)+j])
# print("\n{} = {} + {}\n".format(interm[(n_upsample_blocks+1)*i+j+1],
# interm[(n_upsample_blocks+1)*i+j],
# interm[(n_upsample_blocks+1)*(i+1)+j]))
# print('\n\n\n')
# for x in range(n_upsample_blocks+1):
# for y in range(n_upsample_blocks+1):
# print(interm[x*(n_upsample_blocks+1)+y], end=' ', flush=True)
# print('\n')
# print('\n\n\n')
#print(interm)
"""
for i in range(n_upsample_blocks-2):
interm = []
x = skip_layers_list[n_upsample_blocks-i-2]
x = {}
for stage in range(n_upsample_blocks-1):
i = n_upsample_blocks - stage - 1
x = backbone.layers[skip_connection_idx[i-1]].output
for col in range(stage+1):
print("i = {}, col = {}, index = {}".format(i, col, i+col))
skip_connection = None
if i-col < len(skip_connection_idx):
skip_connection = skip_layers_list[i-col]
upsample_rate = to_tuple(upsample_rates[i-col])
x = up_block(decoder_filters[i-col], stage-col+1, col+1, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
skip_layers_list[i+col] = x
x = backbone.output
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
# skip_connection = backbone.layers[skip_connection_idx[i]].output
skip_connection = skip_layers_list[i]
upsample_rate = to_tuple(upsample_rates[i])
x = up_block(decoder_filters[i], n_upsample_blocks-i, 0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
"""
"""
i = n_upsample_blocks - 1
xx = backbone.layers[skip_connection_idx[i-0-1]].output
skip_connection = skip_layers_list[i-0]
upsample_rate = to_tuple(upsample_rates[i-0])
xx = up_block(decoder_filters[i-0], n_upsample_blocks-i-0, 1+0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(xx)
skip_layers_list[i-0] = xx
i = n_upsample_blocks - 2
xx = backbone.layers[skip_connection_idx[i-0-1]].output
skip_connection = skip_layers_list[i-0]
upsample_rate = to_tuple(upsample_rates[i-0])
xx = up_block(decoder_filters[i-0], n_upsample_blocks-i-0, 1+0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(xx)
skip_layers_list[i-0] = xx
skip_connection = skip_layers_list[i-1]
upsample_rate = to_tuple(upsample_rates[i-1])
xx = up_block(decoder_filters[i-1], n_upsample_blocks-i-1, 1+1, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(xx)
skip_layers_list[i-1] = xx
"""
"""
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
# skip_connection = backbone.layers[skip_connection_idx[i]].output
skip_connection = skip_layers_list[i]
upsample_rate = to_tuple(upsample_rates[i])
x = up_block(decoder_filters[i], n_upsample_blocks-i, 0, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
"""
x = Conv2D(classes, (3,3), padding='same', name='final_conv')(interm[n_upsample_blocks])
x = Activation(activation, name=activation)(x)
model = Model(input, x)
return model
| 7,484 | 41.771429 | 112 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/nestnet/blocks.py | from keras.layers import Conv2DTranspose
from keras.layers import UpSampling2D
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Concatenate
def handle_block_names(stage, cols):
conv_name = 'decoder_stage{}-{}_conv'.format(stage, cols)
bn_name = 'decoder_stage{}-{}_bn'.format(stage, cols)
relu_name = 'decoder_stage{}-{}_relu'.format(stage, cols)
up_name = 'decoder_stage{}-{}_upsample'.format(stage, cols)
merge_name = 'merge_{}-{}'.format(stage, cols)
return conv_name, bn_name, relu_name, up_name, merge_name
def ConvRelu(filters, kernel_size, use_batchnorm=False, conv_name='conv', bn_name='bn', relu_name='relu'):
def layer(x):
x = Conv2D(filters, kernel_size, padding="same", name=conv_name, use_bias=not(use_batchnorm))(x)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def Upsample2D_block(filters, stage, cols, kernel_size=(3,3), upsample_rate=(2,2),
use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name, merge_name = handle_block_names(stage, cols)
x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)
if skip is not None:
x = Concatenate(name=merge_name)([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def Transpose2D_block(filters, stage, cols, kernel_size=(3,3), upsample_rate=(2,2),
transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name, merge_name = handle_block_names(stage, cols)
x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=bn_name+'1')(x)
x = Activation('relu', name=relu_name+'1')(x)
if skip is not None:
x = Concatenate(name=merge_name)([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer | 2,735 | 38.085714 | 106 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/linknet/model.py | from .builder import build_linknet
from ..utils import freeze_model
from ..backbones import get_backbone
DEFAULT_SKIP_CONNECTIONS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4', 'block2_conv2'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'inceptionv3': (228, 86, 16, 9),
'inceptionresnetv2': (594, 260, 16, 9),
'densenet121': (311, 139, 51, 4),
'densenet169': (367, 139, 51, 4),
'densenet201': (479, 139, 51, 4),
}
def Linknet(backbone_name='vgg16',
input_shape=(None, None, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
skip_connections='default',
n_upsample_blocks=5,
decoder_filters=(None, None, None, None, 16),
decoder_use_batchnorm=True,
upsample_layer='upsampling',
upsample_kernel_size=(3, 3),
classes=1,
activation='sigmoid'):
"""
Version of Linkent model (https://arxiv.org/pdf/1707.03718.pdf)
This implementation by default has 4 skip connection links (original - 3).
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C)
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Useful for fine-tuning
skip_connections: if 'default' is used take default skip connections,
decoder_filters: (tuple of int) a number of convolution filters in decoder blocks,
for block with skip connection a number of filters is equal to number of filters in
corresponding encoder block (estimates automatically and can be passed as `None` value).
decoder_use_batchnorm: (bool) if True add batch normalisation layer between `Conv2D` ad `Activation` layers
n_upsample_blocks: (int) a number of upsampling blocks in decoder
upsample_layer: (str) one of 'upsampling' and 'transpose'
upsample_kernel_size: (tuple of int) convolution kernel size in upsampling block
classes: (int) a number of classes for output
activation: (str) one of keras activations
Returns:
model: instance of Keras Model
"""
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
if skip_connections == 'default':
skip_connections = DEFAULT_SKIP_CONNECTIONS[backbone_name]
model = build_linknet(backbone,
classes,
skip_connections,
decoder_filters=decoder_filters,
upsample_layer=upsample_layer,
activation=activation,
n_upsample_blocks=n_upsample_blocks,
upsample_rates=(2, 2, 2, 2, 2),
upsample_kernel_size=upsample_kernel_size,
use_batchnorm=decoder_use_batchnorm)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'link-{}'.format(backbone_name)
return model
| 4,257 | 46.311111 | 115 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/linknet/builder.py | from keras.layers import Conv2D
from keras.layers import Activation
from keras.models import Model
from .blocks import DecoderBlock
from ..utils import get_layer_number, to_tuple
def build_linknet(backbone,
classes,
skip_connection_layers,
decoder_filters=(None, None, None, None, 16),
upsample_rates=(2, 2, 2, 2, 2),
n_upsample_blocks=5,
upsample_kernel_size=(3, 3),
upsample_layer='upsampling',
activation='sigmoid',
use_batchnorm=True):
input = backbone.input
x = backbone.output
# convert layer names to indices
skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in skip_connection_layers])
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
skip_connection = backbone.layers[skip_connection_idx[i]].output
upsample_rate = to_tuple(upsample_rates[i])
x = DecoderBlock(stage=i,
filters=decoder_filters[i],
kernel_size=upsample_kernel_size,
upsample_rate=upsample_rate,
use_batchnorm=use_batchnorm,
upsample_layer=upsample_layer,
skip=skip_connection)(x)
x = Conv2D(classes, (3, 3), padding='same', name='final_conv')(x)
x = Activation(activation, name=activation)(x)
model = Model(input, x)
return model
| 1,663 | 32.28 | 86 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/linknet/blocks.py | import keras.backend as K
from keras.layers import Conv2DTranspose as Transpose
from keras.layers import UpSampling2D
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Add
def handle_block_names(stage):
conv_name = 'decoder_stage{}_conv'.format(stage)
bn_name = 'decoder_stage{}_bn'.format(stage)
relu_name = 'decoder_stage{}_relu'.format(stage)
up_name = 'decoder_stage{}_upsample'.format(stage)
return conv_name, bn_name, relu_name, up_name
def ConvRelu(filters,
kernel_size,
use_batchnorm=False,
conv_name='conv',
bn_name='bn',
relu_name='relu'):
def layer(x):
x = Conv2D(filters,
kernel_size,
padding="same",
name=conv_name,
use_bias=not(use_batchnorm))(x)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def Conv2DUpsample(filters,
upsample_rate,
kernel_size=(3,3),
up_name='up',
conv_name='conv',
**kwargs):
def layer(input_tensor):
x = UpSampling2D(upsample_rate, name=up_name)(input_tensor)
x = Conv2D(filters,
kernel_size,
padding='same',
name=conv_name,
**kwargs)(x)
return x
return layer
def Conv2DTranspose(filters,
upsample_rate,
kernel_size=(4,4),
up_name='up',
**kwargs):
if not tuple(upsample_rate) == (2,2):
raise NotImplementedError(
f'Conv2DTranspose support only upsample_rate=(2, 2), got {upsample_rate}')
def layer(input_tensor):
x = Transpose(filters,
kernel_size=kernel_size,
strides=upsample_rate,
padding='same',
name=up_name)(input_tensor)
return x
return layer
def UpsampleBlock(filters,
upsample_rate,
kernel_size,
use_batchnorm=False,
upsample_layer='upsampling',
conv_name='conv',
bn_name='bn',
relu_name='relu',
up_name='up',
**kwargs):
if upsample_layer == 'upsampling':
UpBlock = Conv2DUpsample
elif upsample_layer == 'transpose':
UpBlock = Conv2DTranspose
else:
raise ValueError(f'Not supported up layer type {upsample_layer}')
def layer(input_tensor):
x = UpBlock(filters,
upsample_rate=upsample_rate,
kernel_size=kernel_size,
use_bias=not(use_batchnorm),
conv_name=conv_name,
up_name=up_name,
**kwargs)(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def DecoderBlock(stage,
filters=None,
kernel_size=(3,3),
upsample_rate=(2,2),
use_batchnorm=False,
skip=None,
upsample_layer='upsampling'):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
input_filters = K.int_shape(input_tensor)[-1]
if skip is not None:
output_filters = K.int_shape(skip)[-1]
else:
output_filters = filters
x = ConvRelu(input_filters // 4,
kernel_size=(1, 1),
use_batchnorm=use_batchnorm,
conv_name=conv_name + '1',
bn_name=bn_name + '1',
relu_name=relu_name + '1')(input_tensor)
x = UpsampleBlock(filters=input_filters // 4,
kernel_size=kernel_size,
upsample_layer=upsample_layer,
upsample_rate=upsample_rate,
use_batchnorm=use_batchnorm,
conv_name=conv_name + '2',
bn_name=bn_name + '2',
up_name=up_name + '2',
relu_name=relu_name + '2')(x)
x = ConvRelu(output_filters,
kernel_size=(1, 1),
use_batchnorm=use_batchnorm,
conv_name=conv_name + '3',
bn_name=bn_name + '3',
relu_name=relu_name + '3')(x)
if skip is not None:
x = Add()([x, skip])
return x
return layer
| 4,938 | 28.753012 | 86 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/unet/model.py | from .builder import build_unet
from ..utils import freeze_model
from ..backbones import get_backbone
DEFAULT_SKIP_CONNECTIONS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3', 'block2_conv2', 'block1_conv2'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4', 'block2_conv2', 'block1_conv2'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'), # check 'bn_data'
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1', 'relu0'),
'inceptionv3': (228, 86, 16, 9),
'inceptionresnetv2': (594, 260, 16, 9),
'densenet121': (311, 139, 51, 4),
'densenet169': (367, 139, 51, 4),
'densenet201': (479, 139, 51, 4),
}
def Unet(backbone_name='vgg16',
input_shape=(None, None, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
skip_connections='default',
decoder_block_type='upsampling',
decoder_filters=(256,128,64,32,16),
decoder_use_batchnorm=True,
n_upsample_blocks=5,
upsample_rates=(2,2,2,2,2),
classes=1,
activation='sigmoid'):
"""
Args:
backbone_name: (str) look at list of available backbones.
input_shape: (tuple) dimensions of input data (H, W, C)
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
'dof' (pre-training on DoF)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Useful for fine-tuning
skip_connections: if 'default' is used take default skip connections,
else provide a list of layer numbers or names starting from top of model
decoder_block_type: (str) one of 'upsampling' and 'transpose' (look at blocks.py)
decoder_filters: (int) number of convolution layer filters in decoder blocks
decoder_use_batchnorm: (bool) if True add batch normalisation layer between `Conv2D` ad `Activation` layers
n_upsample_blocks: (int) a number of upsampling blocks
upsample_rates: (tuple of int) upsampling rates decoder blocks
classes: (int) a number of classes for output
activation: (str) one of keras activations for last model layer
Returns:
keras.models.Model instance
"""
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
if skip_connections == 'default':
skip_connections = DEFAULT_SKIP_CONNECTIONS[backbone_name]
model = build_unet(backbone,
classes,
skip_connections,
decoder_filters=decoder_filters,
block_type=decoder_block_type,
activation=activation,
n_upsample_blocks=n_upsample_blocks,
upsample_rates=upsample_rates,
use_batchnorm=decoder_use_batchnorm)
# lock encoder weights for fine-tuning
if freeze_encoder:
freeze_model(backbone)
model.name = 'u-{}'.format(backbone_name)
return model
| 3,928 | 42.655556 | 118 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/unet/builder.py | from keras.layers import Conv2D
from keras.layers import Activation
from keras.models import Model
from .blocks import Transpose2D_block
from .blocks import Upsample2D_block
from ..utils import get_layer_number, to_tuple
def build_unet(backbone, classes, skip_connection_layers,
decoder_filters=(256,128,64,32,16),
upsample_rates=(2,2,2,2,2),
n_upsample_blocks=5,
block_type='upsampling',
activation='sigmoid',
use_batchnorm=True):
input = backbone.input
x = backbone.output
if block_type == 'transpose':
up_block = Transpose2D_block
else:
up_block = Upsample2D_block
# convert layer names to indices
skip_connection_idx = ([get_layer_number(backbone, l) if isinstance(l, str) else l
for l in skip_connection_layers])
for i in range(n_upsample_blocks):
# check if there is a skip connection
skip_connection = None
if i < len(skip_connection_idx):
skip_connection = backbone.layers[skip_connection_idx[i]].output
upsample_rate = to_tuple(upsample_rates[i])
x = up_block(decoder_filters[i], i, upsample_rate=upsample_rate,
skip=skip_connection, use_batchnorm=use_batchnorm)(x)
x = Conv2D(classes, (3,3), padding='same', name='final_conv')(x)
x = Activation(activation, name=activation)(x)
model = Model(input, x)
return model
| 1,491 | 30.083333 | 86 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/unet/blocks.py | from keras.layers import Conv2DTranspose
from keras.layers import UpSampling2D
from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Concatenate
def handle_block_names(stage):
conv_name = 'decoder_stage{}_conv'.format(stage)
bn_name = 'decoder_stage{}_bn'.format(stage)
relu_name = 'decoder_stage{}_relu'.format(stage)
up_name = 'decoder_stage{}_upsample'.format(stage)
return conv_name, bn_name, relu_name, up_name
def ConvRelu(filters, kernel_size, use_batchnorm=False, conv_name='conv', bn_name='bn', relu_name='relu'):
def layer(x):
x = Conv2D(filters, kernel_size, padding="same", name=conv_name, use_bias=not(use_batchnorm))(x)
if use_batchnorm:
x = BatchNormalization(name=bn_name)(x)
x = Activation('relu', name=relu_name)(x)
return x
return layer
def Upsample2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
x = UpSampling2D(size=upsample_rate, name=up_name)(input_tensor)
if skip is not None:
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '1', bn_name=bn_name + '1', relu_name=relu_name + '1')(x)
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer
def Transpose2D_block(filters, stage, kernel_size=(3,3), upsample_rate=(2,2),
transpose_kernel_size=(4,4), use_batchnorm=False, skip=None):
def layer(input_tensor):
conv_name, bn_name, relu_name, up_name = handle_block_names(stage)
x = Conv2DTranspose(filters, transpose_kernel_size, strides=upsample_rate,
padding='same', name=up_name, use_bias=not(use_batchnorm))(input_tensor)
if use_batchnorm:
x = BatchNormalization(name=bn_name+'1')(x)
x = Activation('relu', name=relu_name+'1')(x)
if skip is not None:
x = Concatenate()([x, skip])
x = ConvRelu(filters, kernel_size, use_batchnorm=use_batchnorm,
conv_name=conv_name + '2', bn_name=bn_name + '2', relu_name=relu_name + '2')(x)
return x
return layer | 2,552 | 36 | 106 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/preprocessing.py | """
Image pre-processing functions.
Images are assumed to be read in uint8 format (range 0-255).
"""
from keras.applications import vgg16
from keras.applications import vgg19
from keras.applications import densenet
from keras.applications import inception_v3
from keras.applications import inception_resnet_v2
identical = lambda x: x
bgr_transpose = lambda x: x[..., ::-1]
models_preprocessing = {
'vgg16': vgg16.preprocess_input,
'vgg19': vgg19.preprocess_input,
'resnet18': bgr_transpose,
'resnet34': bgr_transpose,
'resnet50': bgr_transpose,
'resnet101': bgr_transpose,
'resnet152': bgr_transpose,
'resnext50': identical,
'resnext101': identical,
'densenet121': densenet.preprocess_input,
'densenet169': densenet.preprocess_input,
'densenet201': densenet.preprocess_input,
'inceptionv3': inception_v3.preprocess_input,
'inceptionresnetv2': inception_resnet_v2.preprocess_input,
}
def get_preprocessing(backbone):
return models_preprocessing[backbone]
| 1,019 | 28.142857 | 62 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/inception_v3.py | # -*- coding: utf-8 -*-
"""Inception V3 model for Keras.
Note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function is also different (same as Xception).
# Reference
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from keras.models import Model
from keras import layers
from keras.layers import Activation
from keras.layers import Dense
from keras.layers import Input
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import AveragePooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications import imagenet_utils
import keras
from distutils.version import StrictVersion
if StrictVersion(keras.__version__) < StrictVersion('2.2.0'):
from keras.applications.imagenet_utils import _obtain_input_shape
else:
from keras_applications.imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.5/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
def conv2d_bn(x,
filters,
num_row,
num_col,
padding='same',
strides=(1, 1),
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
num_row: height of the convolution kernel.
num_col: width of the convolution kernel.
padding: padding mode in `Conv2D`.
strides: strides in `Conv2D`.
name: name of the ops; will become `name + '_conv'`
for the convolution and `name + '_bn'` for the
batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_data_format() == 'channels_first':
bn_axis = 1
else:
bn_axis = 3
x = Conv2D(
filters, (num_row, num_col),
strides=strides,
padding=padding,
use_bias=False,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
x = Activation('relu', name=name)(x)
return x
def InceptionV3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception v3 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)` (with `channels_last` data format)
or `(3, 299, 299)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_data_format() == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, strides=(2, 2), padding='same')
x = conv2d_bn(x, 32, 3, 3, padding='same')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv2d_bn(x, 80, 1, 1, padding='same')
x = conv2d_bn(x, 192, 3, 3, padding='same')
x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
# mixed 0, 1, 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed0')
# mixed 1: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed1')
# mixed 2: 35 x 35 x 256
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1, 1)
x = layers.concatenate(
[branch1x1, branch5x5, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed2')
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, strides=(2, 2), padding='same')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(
branch3x3dbl, 96, 3, 3, strides=(2, 2), padding='same')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.concatenate(
[branch3x3, branch3x3dbl, branch_pool], axis=channel_axis, name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 192, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch7x7, branch7x7dbl, branch_pool],
axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
strides=(2, 2), padding='same')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(
branch7x7x3, 192, 3, 3, strides=(2, 2), padding='same')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = layers.concatenate(
[branch3x3, branch7x7x3, branch_pool], axis=channel_axis, name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = layers.concatenate(
[branch3x3_1, branch3x3_2], axis=channel_axis, name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = layers.concatenate(
[branch3x3dbl_1, branch3x3dbl_2], axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = layers.concatenate(
[branch1x1, branch3x3, branch3x3dbl, branch_pool],
axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='inception_v3')
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='9a0d58056eeedaa3f26cb7ebd46da564')
else:
weights_path = get_file(
'inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='bcbd6486424b2319ff4ef7d526e38f63')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
| 15,272 | 36.898263 | 152 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/inception_resnet_v2.py | # -*- coding: utf-8 -*-
"""Inception-ResNet V2 model for Keras.
Model naming and structure follows TF-slim implementation (which has some additional
layers and different number of filters from the original arXiv paper):
https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py
Pre-trained ImageNet weights are also converted from TF-slim, which can be found in:
https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models
# Reference
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
from keras.models import Model
from keras.layers import Activation
from keras.layers import AveragePooling2D
from keras.layers import BatchNormalization
from keras.layers import Conv2D
from keras.layers import Concatenate
from keras.layers import Dense
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import Input
from keras.layers import Lambda
from keras.layers import MaxPooling2D
from keras.utils.data_utils import get_file
from keras.engine.topology import get_source_inputs
from keras.applications import imagenet_utils
from keras import backend as K
import keras
from distutils.version import StrictVersion
if StrictVersion(keras.__version__) < StrictVersion('2.2.0'):
from keras.applications.imagenet_utils import _obtain_input_shape
else:
from keras_applications.imagenet_utils import _obtain_input_shape
BASE_WEIGHT_URL = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.7/'
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
# Arguments
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
# Returns
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = Conv2D(filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
# Arguments
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet blocks
are repeated many times in this network. We use `block_idx` to identify
each of the repetitions. For example, the first Inception-ResNet-A block
will have `block_type='block35', block_idx=0`, ane the layer names will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block
(see [activations](../activations.md)).
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
# Returns
Output tensor for the block.
# Raises
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)
up = conv2d_bn(mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = Lambda(lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=K.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name=block_name + '_ac')(x)
return x
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception-ResNet v2 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that when using TensorFlow, for best performance you should
set `"image_data_format": "channels_last"` in your Keras config
at `~/.keras/keras.json`.
The model and the weights are compatible with TensorFlow, Theano and
CNTK backends. The data format convention used by the model is
the one specified in your Keras config file.
Note that the default input image size for this model is 299x299, instead
of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
function is different (i.e., do not use `imagenet_utils.preprocess_input()`
with this model. Use `preprocess_input()` defined in this module instead).
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
# Returns
A Keras `Model` instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='same')
x = conv2d_bn(x, 32, 3, padding='same')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2, padding='same')(x)
x = conv2d_bn(x, 80, 1, padding='same')
x = conv2d_bn(x, 192, 3, padding='same')
x = MaxPooling2D(3, strides=2, padding='same')(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(x,
scale=0.17,
block_type='block35',
block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='same')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='same')
branch_pool = MaxPooling2D(3, strides=2, padding='same')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(x,
scale=0.1,
block_type='block17',
block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='same')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='same')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='same')
branch_pool = MaxPooling2D(3, strides=2, padding='same')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(x,
scale=0.2,
block_type='block8',
block_idx=block_idx)
x = inception_resnet_block(x,
scale=1.,
activation=None,
block_type='block8',
block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model
model = Model(inputs, x, name='inception_resnet_v2')
# Load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
if include_top:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file(fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='e693bd0210a403b3192acc6073ad2e96')
else:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file(fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model | 16,002 | 42.134771 | 92 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/backbones.py |
from .classification_models.classification_models import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from .classification_models.classification_models import ResNeXt50, ResNeXt101
from .inception_resnet_v2 import InceptionResNetV2
from .inception_v3 import InceptionV3
from keras.applications import DenseNet121, DenseNet169, DenseNet201
from keras.applications import VGG16
from keras.applications import VGG19
backbones = {
"vgg16": VGG16,
"vgg19": VGG19,
"resnet18": ResNet18,
"resnet34": ResNet34,
"resnet50": ResNet50,
"resnet101": ResNet101,
"resnet152": ResNet152,
"resnext50": ResNeXt50,
"resnext101": ResNeXt101,
"inceptionresnetv2": InceptionResNetV2,
"inceptionv3": InceptionV3,
"densenet121": DenseNet121,
"densenet169": DenseNet169,
"densenet201": DenseNet201,
}
def get_backbone(name, *args, **kwargs):
return backbones[name](*args, **kwargs) | 930 | 28.09375 | 107 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/classification_models/classification_models/utils.py | from keras.utils import get_file
def find_weights(weights_collection, model_name, dataset, include_top):
w = list(filter(lambda x: x['model'] == model_name, weights_collection))
w = list(filter(lambda x: x['dataset'] == dataset, w))
w = list(filter(lambda x: x['include_top'] == include_top, w))
return w
def load_model_weights(weights_collection, model, dataset, classes, include_top):
weights = find_weights(weights_collection, model.name, dataset, include_top)
if weights:
weights = weights[0]
if include_top and weights['classes'] != classes:
raise ValueError('If using `weights` and `include_top`'
' as true, `classes` should be {}'.format(weights['classes']))
weights_path = get_file(weights['name'],
weights['url'],
cache_subdir='models',
md5_hash=weights['md5'])
model.load_weights(weights_path)
else:
raise ValueError('There is no weights for such configuration: ' +
'model = {}, dataset = {}, '.format(model.name, dataset) +
'classes = {}, include_top = {}.'.format(classes, include_top))
| 1,263 | 38.5 | 91 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/classification_models/classification_models/resnext/builder.py | import keras.backend as K
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import GlobalAveragePooling2D
from keras.layers import ZeroPadding2D
from keras.layers import Dense
from keras.models import Model
from keras.engine import get_source_inputs
import keras
from distutils.version import StrictVersion
if StrictVersion(keras.__version__) < StrictVersion('2.2.0'):
from keras.applications.imagenet_utils import _obtain_input_shape
else:
from keras_applications.imagenet_utils import _obtain_input_shape
from .params import get_conv_params
from .params import get_bn_params
from .blocks import conv_block
from .blocks import identity_block
def build_resnext(
repetitions=(2, 2, 2, 2),
include_top=True,
input_tensor=None,
input_shape=None,
classes=1000,
first_conv_filters=64,
first_block_filters=64):
"""
TODO
"""
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=197,
data_format='channels_last',
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape, name='data')
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# get parameters for model layers
no_scale_bn_params = get_bn_params(scale=False)
bn_params = get_bn_params()
conv_params = get_conv_params()
init_filters = first_block_filters
# resnext bottom
x = BatchNormalization(name='bn_data', **no_scale_bn_params)(img_input)
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(first_conv_filters, (7, 7), strides=(2, 2), name='conv0', **conv_params)(x)
x = BatchNormalization(name='bn0', **bn_params)(x)
x = Activation('relu', name='relu0')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='pooling0')(x)
# resnext body
for stage, rep in enumerate(repetitions):
for block in range(rep):
filters = init_filters * (2**stage)
# first block of first stage without strides because we have maxpooling before
if stage == 0 and block == 0:
x = conv_block(filters, stage, block, strides=(1, 1))(x)
elif block == 0:
x = conv_block(filters, stage, block, strides=(2, 2))(x)
else:
x = identity_block(filters, stage, block)(x)
# resnext top
if include_top:
x = GlobalAveragePooling2D(name='pool1')(x)
x = Dense(classes, name='fc1')(x)
x = Activation('softmax', name='softmax')(x)
# Ensure that the model takes into account any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model
model = Model(inputs, x)
return model
| 3,364 | 31.355769 | 92 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/classification_models/classification_models/resnext/blocks.py | from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Add
from keras.layers import Lambda
from keras.layers import Concatenate
from keras.layers import ZeroPadding2D
from .params import get_conv_params
from .params import get_bn_params
def handle_block_names(stage, block):
name_base = 'stage{}_unit{}_'.format(stage + 1, block + 1)
conv_name = name_base + 'conv'
bn_name = name_base + 'bn'
relu_name = name_base + 'relu'
sc_name = name_base + 'sc'
return conv_name, bn_name, relu_name, sc_name
def GroupConv2D(filters, kernel_size, conv_params, conv_name, strides=(1,1), cardinality=32):
def layer(input_tensor):
grouped_channels = int(input_tensor.shape[-1]) // cardinality
blocks = []
for c in range(cardinality):
x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels])(input_tensor)
name = conv_name + '_' + str(c)
x = Conv2D(grouped_channels, kernel_size, strides=strides,
name=name, **conv_params)(x)
blocks.append(x)
x = Concatenate(axis=-1)(blocks)
return x
return layer
def conv_block(filters, stage, block, strides=(2, 2)):
"""The conv block is the block that has conv layer at shortcut.
# Arguments
filters: integer, used for first and second conv layers, third conv layer double this value
strides: tuple of integers, strides for conv (3x3) layer in block
stage: integer, current stage label, used for generating layer names
block: integer, current block label, used for generating layer names
# Returns
Output layer for the block.
"""
def layer(input_tensor):
# extracting params and names for layers
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(input_tensor)
x = BatchNormalization(name=bn_name + '1', **bn_params)(x)
x = Activation('relu', name=relu_name + '1')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = GroupConv2D(filters, (3, 3), conv_params, conv_name + '2', strides=strides)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = Conv2D(filters * 2, (1, 1), name=conv_name + '3', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
shortcut = Conv2D(filters*2, (1, 1), name=sc_name, strides=strides, **conv_params)(input_tensor)
shortcut = BatchNormalization(name=sc_name+'_bn', **bn_params)(shortcut)
x = Add()([x, shortcut])
x = Activation('relu', name=relu_name)(x)
return x
return layer
def identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
filters: integer, used for first and second conv layers, third conv layer double this value
stage: integer, current stage label, used for generating layer names
block: integer, current block label, used for generating layer names
# Returns
Output layer for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(input_tensor)
x = BatchNormalization(name=bn_name + '1', **bn_params)(x)
x = Activation('relu', name=relu_name + '1')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = GroupConv2D(filters, (3, 3), conv_params, conv_name + '2')(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = Conv2D(filters * 2, (1, 1), name=conv_name + '3', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Add()([x, input_tensor])
x = Activation('relu', name=relu_name)(x)
return x
return layer
| 4,292 | 36.657895 | 107 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/classification_models/classification_models/resnet/builder.py | import keras.backend as K
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import GlobalAveragePooling2D
from keras.layers import ZeroPadding2D
from keras.layers import Dense
from keras.models import Model
from keras.engine import get_source_inputs
import keras
from distutils.version import StrictVersion
if StrictVersion(keras.__version__) < StrictVersion('2.2.0'):
from keras.applications.imagenet_utils import _obtain_input_shape
else:
from keras_applications.imagenet_utils import _obtain_input_shape
from .params import get_conv_params
from .params import get_bn_params
from .blocks import basic_conv_block
from .blocks import basic_identity_block
from .blocks import conv_block as usual_conv_block
from .blocks import identity_block as usual_identity_block
def build_resnet(
repetitions=(2, 2, 2, 2),
include_top=True,
input_tensor=None,
input_shape=None,
classes=1000,
block_type='usual'):
"""
TODO
"""
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=197,
data_format='channels_last',
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape, name='data')
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# get parameters for model layers
no_scale_bn_params = get_bn_params(scale=False)
bn_params = get_bn_params()
conv_params = get_conv_params()
init_filters = 64
if block_type == 'basic':
conv_block = basic_conv_block
identity_block = basic_identity_block
else:
conv_block = usual_conv_block
identity_block = usual_identity_block
# resnet bottom
x = BatchNormalization(name='bn_data', **no_scale_bn_params)(img_input)
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(init_filters, (7, 7), strides=(2, 2), name='conv0', **conv_params)(x)
x = BatchNormalization(name='bn0', **bn_params)(x)
x = Activation('relu', name='relu0')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding='valid', name='pooling0')(x)
# resnet body
for stage, rep in enumerate(repetitions):
for block in range(rep):
filters = init_filters * (2**stage)
# first block of first stage without strides because we have maxpooling before
if block == 0 and stage == 0:
x = conv_block(filters, stage, block, strides=(1, 1))(x)
elif block == 0:
x = conv_block(filters, stage, block, strides=(2, 2))(x)
else:
x = identity_block(filters, stage, block)(x)
x = BatchNormalization(name='bn1', **bn_params)(x)
x = Activation('relu', name='relu1')(x)
# resnet top
if include_top:
x = GlobalAveragePooling2D(name='pool1')(x)
x = Dense(classes, name='fc1')(x)
x = Activation('softmax', name='softmax')(x)
# Ensure that the model takes into account any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x)
return model
| 3,750 | 32.491071 | 92 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/classification_models/classification_models/resnet/blocks.py | from keras.layers import Conv2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Add
from keras.layers import ZeroPadding2D
from .params import get_conv_params
from .params import get_bn_params
def handle_block_names(stage, block):
name_base = 'stage{}_unit{}_'.format(stage + 1, block + 1)
conv_name = name_base + 'conv'
bn_name = name_base + 'bn'
relu_name = name_base + 'relu'
sc_name = name_base + 'sc'
return conv_name, bn_name, relu_name, sc_name
def basic_identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
x = Add()([x, input_tensor])
return x
return layer
def basic_conv_block(filters, stage, block, strides=(2, 2)):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
shortcut = x
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), strides=strides, name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
shortcut = Conv2D(filters, (1, 1), name=sc_name, strides=strides, **conv_params)(shortcut)
x = Add()([x, shortcut])
return x
return layer
def conv_block(filters, stage, block, strides=(2, 2)):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
shortcut = x
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), strides=strides, name=conv_name + '2', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Activation('relu', name=relu_name + '3')(x)
x = Conv2D(filters*4, (1, 1), name=conv_name + '3', **conv_params)(x)
shortcut = Conv2D(filters*4, (1, 1), name=sc_name, strides=strides, **conv_params)(shortcut)
x = Add()([x, shortcut])
return x
return layer
def identity_block(filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
def layer(input_tensor):
conv_params = get_conv_params()
bn_params = get_bn_params()
conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block)
x = BatchNormalization(name=bn_name + '1', **bn_params)(input_tensor)
x = Activation('relu', name=relu_name + '1')(x)
x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(x)
x = BatchNormalization(name=bn_name + '2', **bn_params)(x)
x = Activation('relu', name=relu_name + '2')(x)
x = ZeroPadding2D(padding=(1, 1))(x)
x = Conv2D(filters, (3, 3), name=conv_name + '2', **conv_params)(x)
x = BatchNormalization(name=bn_name + '3', **bn_params)(x)
x = Activation('relu', name=relu_name + '3')(x)
x = Conv2D(filters*4, (1, 1), name=conv_name + '3', **conv_params)(x)
x = Add()([x, input_tensor])
return x
return layer
| 6,363 | 37.569697 | 100 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/backbones/classification_models/tests/test_imagenet.py | import numpy as np
from skimage.io import imread
from keras.applications.imagenet_utils import decode_predictions
import sys
sys.path.insert(0, '..')
from classification_models import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152
from classification_models import ResNeXt50, ResNeXt101
from classification_models import resnet
from classification_models import resnext
models_zoo = {
'resnet18': {
'model': ResNet18,
'params': [
{
'input_shape': (224,224,3),
'dataset': 'imagenet',
'ground_truth': [(144, 0.5189058), (23, 0.17232688), (21, 0.098873824), (22, 0.03640686), (315, 0.023893135)],
'preprocessing_function': lambda x:resnet.preprocess_input(x, (224, 224), True),
}
]
},
'resnet34': {
'model': ResNet34,
'params': [
{
'input_shape': (224,224,3),
'dataset': 'imagenet',
'ground_truth': [(144, 0.88104683), (23, 0.031556014), (21, 0.024246644), (146, 0.022548646), (94, 0.0057696267)],
'preprocessing_function': lambda x:resnet.preprocess_input(x, (224, 224), True),
}
]
},
'resnet50': {
'model': ResNet50,
'params': [
{
'input_shape': (224, 224, 3),
'dataset': 'imagenet',
'ground_truth': [(21, 0.53156805), (144, 0.37913376), (23, 0.057184655), (146, 0.024926249), (22, 0.0015899206)],
'preprocessing_function': lambda x: resnet.preprocess_input(x, (224, 224), True),
},
]
},
'resnet101': {
'model': ResNet101,
'params': [
{
'input_shape': (224,224,3),
'dataset': 'imagenet',
'ground_truth': [(21, 0.96975815), (144, 0.016729029), (146, 0.00535842), (99, 0.0017561398), (22, 0.0010300555)],
'preprocessing_function': lambda x:resnet.preprocess_input(x, (224, 224), True),
}
]
},
'resnet152': {
'model': ResNet152,
'params': [
{
'input_shape': (224,224,3),
'dataset': 'imagenet',
'ground_truth': [(21, 0.59152377), (144, 0.2688002), (97, 0.0474935), (146, 0.035076432), (99, 0.014631907)],
'preprocessing_function': lambda x:resnet.preprocess_input(x, (224, 224), True),
}
]
},
'resnext50': {
'model': ResNeXt50,
'params': [
{
'input_shape': (224,224,3),
'dataset': 'imagenet',
'ground_truth': [(396, 0.97365075), (398, 0.0096320715), (409, 0.005558599), (438, 0.0028824762), (440, 0.0019731398)],
'preprocessing_function': lambda x:resnext.preprocess_input(x, (224, 224)),
}
]
},
'resnext101': {
'model': ResNeXt101,
'params': [
{
'input_shape': (224,224,3),
'dataset': 'imagenet',
'ground_truth': [(396, 0.95073587), (440, 0.016645206), (426, 0.004068849), (398, 0.0032844676), (392, 0.0022560472)],
'preprocessing_function': lambda x:resnext.preprocess_input(x, (224, 224)),
}
]
},
}
def get_top(y, top=5):
y = y.squeeze()
idx = y.argsort()[::-1]
top_idx = idx[:top]
top_pred = y[top_idx]
return list(zip(top_idx, top_pred))
def is_equal(gt, pr, eps=10e-5):
for i in range(len(gt)):
idx_gt, prob_gt = gt[i]
idx_pr, prob_pr = pr[i]
if idx_gt != idx_pr:
return False
if not np.allclose(prob_gt, prob_pr, atol=eps):
return False
return True
def test_model(model, preprocessing_func, sample, ground_truth):
x = preprocessing_func(sample)
x = np.expand_dims(x, 0)
y = model.predict(x)
print('[INFO]', decode_predictions(y))
pred = get_top(y)
if is_equal(pred, ground_truth):
print('[INFO] Test passed...\n')
else:
print('[WARN] TEST FAILED...')
print('[WARN] PREDICTION', pred)
print('[WARN] GROUND TRUTH', ground_truth)
print()
def main():
path = ('../imgs/tests/seagull.jpg')
img = imread(path)
for model_type in models_zoo:
for params in models_zoo[model_type]['params']:
input_shape = params['input_shape']
dataset = params['dataset']
preprocessing_function = params['preprocessing_function']
groud_truth = params['ground_truth']
print('[INFO] Loading model {} with weights {}....'.format(model_type, dataset))
model = models_zoo[model_type]['model']
model = model(input_shape, weights=dataset, classes=1000)
test_model(model, preprocessing_function, img, groud_truth)
if __name__ == '__main__':
main()
| 4,964 | 29.838509 | 135 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/fpn/model.py | from .builder import build_fpn
from ..backbones import get_backbone
from ..utils import freeze_model
DEFAULT_FEATURE_PYRAMID_LAYERS = {
'vgg16': ('block5_conv3', 'block4_conv3', 'block3_conv3'),
'vgg19': ('block5_conv4', 'block4_conv4', 'block3_conv4'),
'resnet18': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet34': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnet152': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnext50': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'resnext101': ('stage4_unit1_relu1', 'stage3_unit1_relu1', 'stage2_unit1_relu1'),
'inceptionv3': (228, 86, 16),
'inceptionresnetv2': (594, 260, 16),
'densenet121': (311, 139, 51),
'densenet169': (367, 139, 51),
'densenet201': (479, 139, 51),
}
def FPN(backbone_name='vgg16',
input_shape=(None, None, 3),
input_tensor=None,
encoder_weights='imagenet',
freeze_encoder=False,
fpn_layers='default',
pyramid_block_filters=256,
segmentation_block_filters=128,
upsample_rates=(2, 2, 2),
last_upsample=4,
interpolation='bilinear',
use_batchnorm=True,
classes=21,
activation='softmax',
dropout=None):
"""
Implementation of FPN head for segmentation models according to:
http://presentations.cocodataset.org/COCO17-Stuff-FAIR.pdf
Args:
backbone_name: (str) see available backbones
classes: (int) a number of classes for output
input_shape: (tuple) dimensions of input data (H, W, C)
input_tensor: keras tensor
encoder_weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet)
freeze_encoder: (bool) Set encoder layers weights as non-trainable. Useful for fine-tuning
fpn_layers: (list) of layer names or indexes, used for pyramid
pyramid_block_filters: (int) number of filters in `M` blocks of top-down FPN branch
segmentation_block_filters: (int) number of filters in `P` blocks of FPN
upsample_rates: (tuple) rates for upsampling pyramid blocks
last_upsample: (int) rate for upsumpling concatenated pyramid predictions to
match spatial resolution of input data
interpolation: (str) interpolation type for upsampling layers - 'nearest' or 'bilinear'
use_batchnorm: (bool) if True add batch normalisation layer between `Conv2D` ad `Activation` layers
activation: (str) one of keras activations
dropout: None or float [0, 1), dropout rate
Returns:
keras.models.Model
"""
backbone = get_backbone(backbone_name,
input_shape=input_shape,
input_tensor=input_tensor,
weights=encoder_weights,
include_top=False)
if fpn_layers == 'default':
fpn_layers = DEFAULT_FEATURE_PYRAMID_LAYERS[backbone_name]
model = build_fpn(backbone, fpn_layers,
classes=classes,
pyramid_filters=pyramid_block_filters,
segmentation_filters=segmentation_block_filters,
upsample_rates=upsample_rates,
use_batchnorm=use_batchnorm,
dropout=dropout,
last_upsample=last_upsample,
interpolation=interpolation,
activation=activation)
if freeze_encoder:
freeze_model(backbone)
model.name = 'fpn-{}'.format(backbone.name)
return model
| 3,960 | 42.054348 | 107 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/fpn/builder.py | import numpy as np
from keras.layers import Conv2D
from keras.layers import Concatenate
from keras.layers import Activation
from keras.layers import SpatialDropout2D
from keras.models import Model
from .blocks import pyramid_block
from ..common import ResizeImage
from ..common import Conv2DBlock
from ..utils import extract_outputs, to_tuple
def build_fpn(backbone,
fpn_layers,
classes=21,
activation='softmax',
upsample_rates=(2,2,2),
last_upsample=4,
pyramid_filters=256,
segmentation_filters=128,
use_batchnorm=False,
dropout=None,
interpolation='bilinear'):
"""
Implementation of FPN head for segmentation models according to:
http://presentations.cocodataset.org/COCO17-Stuff-FAIR.pdf
Args:
backbone: Keras `Model`, some classification model without top
layers: list of layer names or indexes, used for pyramid building
classes: int, number of output feature maps
activation: activation in last layer, e.g. 'sigmoid' or 'softmax'
upsample_rates: tuple of integers, scaling rates between pyramid blocks
pyramid_filters: int, number of filters in `M` blocks of top-down FPN branch
segmentation_filters: int, number of filters in `P` blocks of FPN
last_upsample: rate for upsumpling concatenated pyramid predictions to
match spatial resolution of input data
last_upsampling_type: 'nn' or 'bilinear'
dropout: float [0, 1), dropout rate
use_batchnorm: bool, include batch normalization to FPN between `conv`
and `relu` layers
Returns:
model: Keras `Model`
"""
if len(upsample_rates) != len(fpn_layers):
raise ValueError('Number of intermediate feature maps and upsample steps should match')
# extract model layer outputs
outputs = extract_outputs(backbone, fpn_layers, include_top=True)
# add upsample rate `1` for first block
upsample_rates = [1] + list(upsample_rates)
# top - down path, build pyramid
m = None
pyramid = []
for i, c in enumerate(outputs):
m, p = pyramid_block(pyramid_filters=pyramid_filters,
segmentation_filters=segmentation_filters,
upsample_rate=upsample_rates[i],
use_batchnorm=use_batchnorm,
stage=i)(c, m)
pyramid.append(p)
# upsample and concatenate all pyramid layer
upsampled_pyramid = []
for i, p in enumerate(pyramid[::-1]):
if upsample_rates[i] > 1:
upsample_rate = to_tuple(np.prod(upsample_rates[:i+1]))
p = ResizeImage(upsample_rate, interpolation=interpolation)(p)
upsampled_pyramid.append(p)
x = Concatenate()(upsampled_pyramid)
# final convolution
n_filters = segmentation_filters * len(pyramid)
x = Conv2DBlock(n_filters, (3, 3), use_batchnorm=use_batchnorm, padding='same')(x)
if dropout is not None:
x = SpatialDropout2D(dropout)(x)
x = Conv2D(classes, (3, 3), padding='same')(x)
# upsampling to original spatial resolution
x = ResizeImage(to_tuple(last_upsample), interpolation=interpolation)(x)
# activation
x = Activation(activation)(x)
model = Model(backbone.input, x)
return model
| 3,418 | 34.614583 | 95 | py |
Nested-UNet | Nested-UNet-master/segmentation_models/fpn/blocks.py | from keras.layers import Add
from ..common import Conv2DBlock
from ..common import ResizeImage
from ..utils import to_tuple
def pyramid_block(pyramid_filters=256, segmentation_filters=128, upsample_rate=2,
use_batchnorm=False, stage=0):
"""
Pyramid block according to:
http://presentations.cocodataset.org/COCO17-Stuff-FAIR.pdf
This block generate `M` and `P` blocks.
Args:
pyramid_filters: integer, filters in `M` block of top-down FPN branch
segmentation_filters: integer, number of filters in segmentation head,
basically filters in convolution layers between `M` and `P` blocks
upsample_rate: integer, uspsample rate for `M` block of top-down FPN branch
use_batchnorm: bool, include batchnorm in convolution blocks
Returns:
Pyramid block function (as Keras layers functional API)
"""
def layer(c, m=None):
x = Conv2DBlock(pyramid_filters, (1, 1),
padding='same',
use_batchnorm=use_batchnorm,
name='pyramid_stage_{}'.format(stage))(c)
if m is not None:
up = ResizeImage(to_tuple(upsample_rate))(m)
x = Add()([x, up])
# segmentation head
p = Conv2DBlock(segmentation_filters, (3, 3),
padding='same',
use_batchnorm=use_batchnorm,
name='segm1_stage_{}'.format(stage))(x)
p = Conv2DBlock(segmentation_filters, (3, 3),
padding='same',
use_batchnorm=use_batchnorm,
name='segm2_stage_{}'.format(stage))(p)
m = x
return m, p
return layer
| 1,750 | 33.333333 | 83 | py |
hackathon-ci-2020 | hackathon-ci-2020-master/solutions/unaccountable_penguis/pix2pix_cloudtop.py | import numpy as np
import tensorflow as tf
import os
import time
#_____________________
#Loading and preprocessing the data
#_____________________
my_path = '/home/harder/imagery'
CloudTop = np.load(my_path + '/X_train_CI20.npy')
TrueColor = np.load(my_path + '/Y_train_CI20.npy')
#sort out dark pictures (just quick naive method)
TrueColorNZ = np.delete(TrueColor,np.where(np.sum(TrueColor,axis=(1,2,3))<200000),0)
CloudTopNZ = np.delete(CloudTop,np.where(np.sum(TrueColor,axis=(1,2,3))<200000),0)
del TrueColor, CloudTop
IMG_WIDTH = 256
IMG_HEIGHT = 256
def resize(input_image, real_image, height, width):
input_image = tf.image.resize(input_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
real_image = tf.image.resize(real_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
return input_image, real_image
def normalize(input_image, real_image):
input_image = (input_image / 127.5) - 1
real_image = (real_image / 127.5) - 1
return input_image, real_image
train_dataset = []
for i in range(CloudTopNZ.shape[0]):
ct = tf.cast(CloudTopNZ[i,:,:,:], tf.float32)
tc = tf.cast(TrueColorNZ[i,:,:,:], tf.float32)
ct, tc = resize(ct, tc,IMG_HEIGHT, IMG_WIDTH)
ct, tc = normalize(ct, tc)
ct = tf.expand_dims(ct, 0)
tc = tf.expand_dims(tc, 0)
train_dataset.append((ct, tc))
del TrueColorNZ, CloudTopNZ
#_______________________
#Defining the model (source: tensorflow.org/tutorials/generative/pix2pix)
#_______________________
BUFFER_SIZE = 400
BATCH_SIZE = 1
EPOCHS = 2
def downsample(filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',kernel_initializer=initializer, use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.LeakyReLU())
return result
def upsample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
OUTPUT_CHANNELS = 3
def Generator():
inputs = tf.keras.layers.Input(shape=[256,256,3])
down_stack = [
downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
downsample(128, 4), # (bs, 64, 64, 128)
downsample(256, 4), # (bs, 32, 32, 256)
downsample(512, 4), # (bs, 16, 16, 512)
downsample(512, 4), # (bs, 8, 8, 512)
downsample(512, 4), # (bs, 4, 4, 512)
downsample(512, 4), # (bs, 2, 2, 512)
downsample(512, 4), # (bs, 1, 1, 512)
]
up_stack = [
upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
upsample(512, 4), # (bs, 16, 16, 1024)
upsample(256, 4), # (bs, 32, 32, 512)
upsample(128, 4), # (bs, 64, 64, 256)
upsample(64, 4), # (bs, 128, 128, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation='tanh') # (bs, 256, 256, 3)
x = inputs
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = tf.keras.layers.Concatenate()([x, skip])
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
generator = Generator()
LAMBDA = 100
def generator_loss(disc_generated_output, gen_output, target):
gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
# ssim loss
ssim_loss = tf.reduce_mean(1 - tf.image.ssim(target+1, gen_output+1,2.0))
total_gen_loss = gan_loss + (LAMBDA * ssim_loss)
return total_gen_loss, gan_loss, ssim_loss
def Discriminator():
initializer = tf.random_normal_initializer(0., 0.02)
inp = tf.keras.layers.Input(shape=[256, 256, 3], name='input_image')
tar = tf.keras.layers.Input(shape=[256, 256, 3], name='target_image')
x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
down1 = downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512, 4, strides=1,
kernel_initializer=initializer,
use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1, 4, strides=1,
kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[inp, tar], outputs=last)
discriminator = Discriminator()
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def discriminator_loss(disc_real_output, disc_generated_output):
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
checkpoint_dir = my_path + '/training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
@tf.function
def train_step(input_image, target, epoch):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
gen_output = generator(input_image, training=True)
disc_real_output = discriminator([input_image, target], training=True)
disc_generated_output = discriminator([input_image, gen_output], training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
generator_gradients = gen_tape.gradient(gen_total_loss,generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss,discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(generator_gradients,generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_gradients,discriminator.trainable_variables))
def fit(train_ds, epochs):
for epoch in range(epochs):
start = time.time()
print("Epoch: ", epoch)
for n in range(len(train_ds)):
(input_image, target) = train_ds[n]
train_step(input_image, target, epoch)
print ('Time taken for epoch {} is {} sec\n'.format(epoch + 1,
time.time()-start))
checkpoint.save(file_prefix = checkpoint_prefix)
#____________
# Training the model
#____________
fit(train_dataset, EPOCHS)
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
X_test = np.load(my_path + '/X_test_CI20_phase2.npy')
test_dataset = []
predictions = np.zeros(X_test.shape)
for i in range(X_test.shape[0]):
ct = tf.cast(X_test[i,:,:,:], tf.float32)
ct, tc = resize(ct, ct,IMG_HEIGHT, IMG_WIDTH)
ct, tc = normalize(ct, tc)
ct = tf.expand_dims(ct, 0)
tc = tf.expand_dims(tc, 0)
test_dataset.append((ct, tc))
for i in range(X_test.shape[0]):
(test_input,_) = test_dataset[i]
pred = generator(test_input,training=False)
pred_crop = tf.squeeze(pred,0)
pred_un = (pred_crop+1)*127.5
pred_resize = tf.image.resize(pred_un, [127, 127],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
predictions[i,:,:,:] = pred_resize.numpy()
np.save(my_path + '/Y_test_CI20_phase2.npy', predictions) | 8,978 | 39.813636 | 127 | py |
BayesianRelevance | BayesianRelevance-master/src/attack_networks.py | import argparse
import numpy as np
import os
import torch
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
from networks.advNN import *
from networks.baseNN import *
from networks.fullBNN import *
from utils import savedir
from utils.data import *
from utils.seeding import *
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="baseNN", type=str, help="baseNN, fullBNN, advNN")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings.")
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points to be attacked.")
parser.add_argument("--n_samples", default=100, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--attack_iters", default=10, type=int, help="Number of iterations in iterative attacks.")
parser.add_argument("--attack_lrp_rule", default='epsilon', type=str, help="LRP rule used for the attacks.")
parser.add_argument("--epsilon", default=0.2, type=int, help="Strength of a perturbation.")
parser.add_argument("--load", default=False, type=eval, help="Load saved computations and evaluate them.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
MODE_ATKS = False
n_inputs=100 if args.debug else args.n_inputs
n_samples=5 if args.debug else args.n_samples
# attack_iters=10 if args.attack_method=='beta' else 10
attack_hyperparams={'epsilon':args.epsilon, 'iters':args.attack_iters, 'lrp_rule':args.attack_lrp_rule}
print("PyTorch Version: ", torch.__version__)
if args.attack_method=="deepfool":
args.device="cpu"
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if args.model=="baseNN":
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=model["dataset"], n_inputs=n_inputs)[2:]
savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
net = baseNN(inp_shape, out_size, *list(model.values()))
net.load(savedir=savedir, device=args.device)
if args.load:
x_attack = load_attack(method=args.attack_method, model_savedir=savedir)
else:
x_attack = attack(net=net, x_test=x_test, y_test=y_test, hyperparams=attack_hyperparams,
device=args.device, method=args.attack_method)
save_attack(x_test, x_attack, method=args.attack_method, model_savedir=savedir)
evaluate_attack(net=net, x_test=x_test, x_attack=x_attack, y_test=y_test, device=args.device)
elif args.model=="advNN":
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=model["dataset"], n_inputs=n_inputs)[2:]
savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx, attack_method='fgsm')
net = advNN(inp_shape, out_size, *list(model.values()), attack_method='fgsm')
net.load(savedir=savedir, device=args.device)
if args.load:
x_attack = load_attack(method=args.attack_method, model_savedir=savedir)
else:
x_attack = attack(net=net, x_test=x_test, y_test=y_test, hyperparams=attack_hyperparams,
device=args.device, method=args.attack_method)
save_attack(x_test, x_attack, method=args.attack_method, model_savedir=savedir)
evaluate_attack(net=net, x_test=x_test, x_attack=x_attack, y_test=y_test, device=args.device)
else:
if args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=m["dataset"], n_inputs=n_inputs)[2:]
savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
net = BNN(m["dataset"], *list(m.values())[1:], inp_shape, out_size)
else:
raise NotImplementedError
net.load(savedir=savedir, device=args.device)
if args.load:
x_attack = load_attack(method=args.attack_method, model_savedir=savedir, n_samples=n_samples)
evaluate_attack(net=net, x_test=x_test, x_attack=x_attack, y_test=y_test,
device=args.device, n_samples=n_samples)
if MODE_ATKS:
if m["inference"]=="svi":
mode_attack = load_attack(method=args.attack_method, model_savedir=savedir,
n_samples=n_samples, atk_mode=True)
evaluate_attack(net=net, x_test=x_test, x_attack=mode_attack, y_test=y_test,
device=args.device, n_samples=n_samples, avg_posterior=True)
else:
batch_size = 4000 if m["inference"] == "hmc" else 128
num_workers = 0 if args.device=="cuda" else 4
x_attack = attack(net=net, x_test=x_test, y_test=y_test, device=args.device,
method=args.attack_method, n_samples=n_samples, hyperparams=attack_hyperparams)
save_attack(x_test, x_attack, method=args.attack_method,
model_savedir=savedir, n_samples=n_samples)
evaluate_attack(net=net, x_test=x_test, x_attack=x_attack, y_test=y_test,
device=args.device, n_samples=n_samples)
if MODE_ATKS:
if m["inference"]=="svi":
mode_attack = attack(net=net, x_test=x_test, y_test=y_test, device=args.device, hyperparams=attack_hyperparams,
method=args.attack_method, n_samples=n_samples, avg_posterior=True)
save_attack(x_test, mode_attack, method=args.attack_method,
model_savedir=savedir, n_samples=n_samples, atk_mode=True)
evaluate_attack(net=net, x_test=x_test, x_attack=mode_attack, y_test=y_test,
device=args.device, n_samples=n_samples, avg_posterior=True)
| 6,374 | 45.195652 | 127 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.