code stringlengths 17 6.64M |
|---|
def block(x, c):
filters_in = x.get_shape()[(- 1)]
m = (4 if c['bottleneck'] else 1)
filters_out = (m * c['block_filters_internal'])
shortcut = x
c['conv_filters_out'] = c['block_filters_internal']
if c['bottleneck']:
with tf.variable_scope('a'):
c['ksize'] = 1
c['stride'] = c['block_stride']
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('b'):
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('c'):
c['conv_filters_out'] = filters_out
c['ksize'] = 1
assert (c['stride'] == 1)
x = conv(x, c)
x = bn(x, c)
else:
with tf.variable_scope('A'):
c['stride'] = c['block_stride']
assert (c['ksize'] == 3)
x = conv(x, c)
x = bn(x, c)
x = activation(x)
with tf.variable_scope('B'):
c['conv_filters_out'] = filters_out
assert (c['ksize'] == 3)
assert (c['stride'] == 1)
x = conv(x, c)
x = bn(x, c)
with tf.variable_scope('shortcut'):
if ((filters_out != filters_in) or (c['block_stride'] != 1)):
c['ksize'] = 1
c['stride'] = c['block_stride']
c['conv_filters_out'] = filters_out
shortcut = conv(shortcut, c)
shortcut = bn(shortcut, c)
return activation((x + shortcut))
|
def bn(x, c):
x_shape = x.get_shape()
params_shape = x_shape[(- 1):]
if c['use_bias']:
bias = _get_variable('bias', params_shape, initializer=tf.zeros_initializer)
return (x + bias)
axis = list(range((len(x_shape) - 1)))
beta = _get_variable('beta', params_shape, initializer=tf.zeros_initializer)
gamma = _get_variable('gamma', params_shape, initializer=tf.ones_initializer)
moving_mean = _get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer, trainable=False)
moving_variance = _get_variable('moving_variance', params_shape, initializer=tf.ones_initializer, trainable=False)
(mean, variance) = tf.nn.moments(x, axis)
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
(mean, variance) = control_flow_ops.cond(c['is_training'], (lambda : (mean, variance)), (lambda : (moving_mean, moving_variance)))
x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
return x
|
def fc(x, c):
num_units_in = x.get_shape()[1]
num_units_out = c['fc_units_out']
weights_initializer = tf.truncated_normal_initializer(stddev=FC_WEIGHT_STDDEV)
weights = _get_variable('weights', shape=[num_units_in, num_units_out], initializer=weights_initializer, weight_decay=FC_WEIGHT_STDDEV)
biases = _get_variable('biases', shape=[num_units_out], initializer=tf.zeros_initializer)
x = tf.nn.xw_plus_b(x, weights, biases)
return x
|
def _get_variable(name, shape, initializer, weight_decay=0.0, dtype='float', trainable=True):
'A little wrapper around tf.get_variable to do weight decay and add to'
'resnet collection'
if (weight_decay > 0):
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
else:
regularizer = None
collections = [tf.GraphKeys.VARIABLES, RESNET_VARIABLES]
return tf.get_variable(name, shape=shape, initializer=initializer, dtype=dtype, regularizer=regularizer, collections=collections, trainable=trainable)
|
def conv(x, c):
ksize = c['ksize']
stride = c['stride']
filters_out = c['conv_filters_out']
filters_in = x.get_shape()[(- 1)]
shape = [ksize, ksize, filters_in, filters_out]
initializer = tf.truncated_normal_initializer(stddev=CONV_WEIGHT_STDDEV)
weights = _get_variable('weights', shape=shape, dtype='float', initializer=initializer, weight_decay=CONV_WEIGHT_DECAY)
return tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
|
def _max_pool(x, ksize=3, stride=2):
return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], padding='SAME')
|
def read_cifar10(filename_queue):
'Reads and parses examples from CIFAR10 data files.\n\n Recommendation: if you want N-way read parallelism, call this function\n N times. This will give you N independent Readers reading different\n files & positions within those files, which will give better mixing of\n examples.\n\n Args:\n filename_queue: A queue of strings with the filenames to read from.\n\n Returns:\n An object representing a single example, with the following fields:\n height: number of rows in the result (32)\n width: number of columns in the result (32)\n depth: number of color channels in the result (3)\n key: a scalar string Tensor describing the filename & record number\n for this example.\n label: an int32 Tensor with the label in the range 0..9.\n uint8image: a [height, width, depth] uint8 Tensor with the image data\n '
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
image_bytes = ((result.height * result.width) * result.depth)
record_bytes = (label_bytes + image_bytes)
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
(result.key, value) = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
result.label = tf.cast(tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]), [result.depth, result.height, result.width])
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
|
def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size, shuffle):
'Construct a queued batch of images and labels.\n\n Args:\n image: 3-D Tensor of [height, width, 3] of type.float32.\n label: 1-D Tensor of type.int32\n min_queue_examples: int32, minimum number of samples to retain\n in the queue that provides of batches of examples.\n batch_size: Number of images per batch.\n shuffle: boolean indicating whether to use a shuffling queue.\n\n Returns:\n images: Images. 4D tensor of [batch_size, height, width, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n '
num_preprocess_threads = 16
if shuffle:
(images, label_batch) = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)), min_after_dequeue=min_queue_examples)
else:
(images, label_batch) = tf.train.batch([image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=(min_queue_examples + (3 * batch_size)))
return (images, tf.reshape(label_batch, [batch_size]))
|
def distorted_inputs(data_dir, batch_size):
'Construct distorted input for CIFAR training using the Reader ops.\n\n Args:\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n '
filenames = [os.path.join(data_dir, 'cifar-10-batches-bin', ('data_batch_%d.bin' % i)) for i in xrange(1, 6)]
for f in filenames:
if (not tf.gfile.Exists(f)):
raise ValueError(('Failed to find file: ' + f))
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
float_image = tf.image.per_image_whitening(distorted_image)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int((NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue))
print(('Filling queue with %d CIFAR images before starting to train. This will take a few minutes.' % min_queue_examples))
return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=True)
|
def inputs(eval_data, data_dir, batch_size):
'Construct input for CIFAR evaluation using the Reader ops.\n\n Args:\n eval_data: bool, indicating if one should use the train or eval data set.\n data_dir: Path to the CIFAR-10 data directory.\n batch_size: Number of images per batch.\n\n Returns:\n images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.\n labels: Labels. 1D tensor of [batch_size] size.\n '
if (not eval_data):
assert False, "hack. shouldn't go here"
filenames = [os.path.join(data_dir, ('data_batch_%d.bin' % i)) for i in xrange(1, 6)]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
else:
filenames = [os.path.join(data_dir, 'cifar-10-batches-bin', 'test_batch.bin')]
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL
for f in filenames:
if (not tf.gfile.Exists(f)):
raise ValueError(('Failed to find file: ' + f))
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
height = IMAGE_SIZE
width = IMAGE_SIZE
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, width, height)
float_image = tf.image.per_image_whitening(resized_image)
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int((num_examples_per_epoch * min_fraction_of_examples_in_queue))
return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=False)
|
def maybe_download_and_extract():
"Download and extract the tarball from Alex's website."
dest_directory = FLAGS.data_dir
if (not os.path.exists(dest_directory)):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[(- 1)]
filepath = os.path.join(dest_directory, filename)
if (not os.path.exists(filepath)):
def _progress(count, block_size, total_size):
sys.stdout.write(('\r>> Downloading %s %.1f%%' % (filename, ((float((count * block_size)) / float(total_size)) * 100.0))))
sys.stdout.flush()
(filepath, _) = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
|
def main(argv=None):
maybe_download_and_extract()
(images_train, labels_train) = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size)
(images_val, labels_val) = inputs(True, FLAGS.data_dir, FLAGS.batch_size)
is_training = tf.placeholder('bool', [], name='is_training')
(images, labels) = tf.cond(is_training, (lambda : (images_train, labels_train)), (lambda : (images_val, labels_val)))
logits = inference_small(images, num_classes=10, is_training=is_training, use_bias=(not FLAGS.use_bn), num_blocks=3)
train(is_training, logits, images, labels)
|
def generate_prompt(data_point):
if data_point['input']:
return f''' # noqa: E501
{data_point['instruction']}
### input:
{data_point['input']}
### Response:
{data_point['output']}'''
else:
return f'''Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{data_point['instruction']}
### Response:
{data_point['output']}'''
|
def generate_and_tokenize_prompt(data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(full_prompt)
user_prompt = generate_prompt({**data_point, 'output': ''})
tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt['input_ids'])
tokenized_full_prompt['labels'] = (([(- 100)] * user_prompt_len) + tokenized_full_prompt['labels'][user_prompt_len:])
return tokenized_full_prompt
|
def tokenize(prompt, add_eos_token=True):
cutoff_len = 256
result = tokenizer(prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None)
if ((result['input_ids'][(- 1)] != tokenizer.eos_token_id) and (len(result['input_ids']) < cutoff_len) and add_eos_token):
result['input_ids'].append(tokenizer.eos_token_id)
result['attention_mask'].append(1)
result['labels'] = result['input_ids'].copy()
return result
|
def main(load_8bit: bool=False, base_model: str='', lora_weights: str='tloen/alpaca-lora-7b', share_gradio: bool=True):
assert base_model, "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
tokenizer = LlamaTokenizer.from_pretrained(base_model)
if (device == 'cuda'):
model = LlamaForCausalLM.from_pretrained(base_model, load_in_8bit=load_8bit, torch_dtype=torch.float16, device_map='auto')
model = PeftModel.from_pretrained(model, lora_weights, torch_dtype=torch.float16)
elif (device == 'mps'):
model = LlamaForCausalLM.from_pretrained(base_model, device_map={'': device}, torch_dtype=torch.float16)
model = PeftModel.from_pretrained(model, lora_weights, device_map={'': device}, torch_dtype=torch.float16)
else:
model = LlamaForCausalLM.from_pretrained(base_model, device_map={'': device}, low_cpu_mem_usage=True)
model = PeftModel.from_pretrained(model, lora_weights, device_map={'': device})
model.config.pad_token_id = tokenizer.pad_token_id = 0
model.config.bos_token_id = 1
model.config.eos_token_id = 2
if (not load_8bit):
model.half()
model.eval()
if ((torch.__version__ >= '2') and (sys.platform != 'win32')):
model = torch.compile(model)
def evaluate(instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=128, **kwargs):
prompt = generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors='pt')
input_ids = inputs['input_ids'].to(device)
generation_config = GenerationConfig(temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, num_return_sequences=num_beams, **kwargs)
with torch.no_grad():
generation_output = model.generate(input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens)
s = []
for i in range(num_beams):
temp = generation_output.sequences[i]
s.append(tokenizer.decode(temp, skip_special_tokens=True))
output = ''
for cur in s:
output += (cur.split('### Response:')[1].strip() + '\n')
return output
gr.Interface(fn=evaluate, inputs=[gr.components.Textbox(lines=2, label='Instruction', placeholder='Tell me about alpacas.'), gr.components.Textbox(lines=2, label='Input', placeholder='none'), gr.components.Slider(minimum=0, maximum=1, value=0.1, label='Temperature'), gr.components.Slider(minimum=0, maximum=1, value=0.75, label='Top p'), gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label='Top k'), gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label='Beams'), gr.components.Slider(minimum=1, maximum=2000, step=1, value=128, label='Max tokens')], outputs=[gr.inputs.Textbox(lines=10, label='Output')], title='Wiselab-LoRA', description='Wiselab-LoRA is a 7B-parameter LLaMA-LoRA based model.').launch(share=share_gradio)
'\n # testing code for readme\n for instruction in [\n "Tell me about alpacas.",\n "Tell me about the president of Mexico in 2019.",\n "Tell me about the king of France in 2019.",\n "List all Canadian provinces in alphabetical order.",\n "Write a Python program that prints the first 10 Fibonacci numbers.",\n "Write a program that prints the numbers from 1 to 100. But for multiples of three print \'Fizz\' instead of the number and for the multiples of five print \'Buzz\'. For numbers which are multiples of both three and five print \'FizzBuzz\'.", # noqa: E501\n "Tell me five words that rhyme with \'shock\'.",\n "Translate the sentence \'I have no mouth but I must scream\' into Spanish.",\n "Count up from 1 to 500.",\n ]:\n print("Instruction:", instruction)\n print("Response:", evaluate(instruction))\n print()\n '
|
def generate_prompt(instruction, input=None):
if input:
return f'''Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{instruction}
### Input:
{input}
### Response:
'''
else:
return f'''Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{instruction}
### Response:
'''
|
def train(base_model: str='', data_path: str='yahma/alpaca-cleaned', output_dir: str='/common/users/jj635/llama/mycheckpoint/', batch_size: int=128, micro_batch_size: int=4, num_epochs: int=3, learning_rate: float=0.0003, cutoff_len: int=256, val_set_size: int=0, lora_r: int=8, lora_alpha: int=16, lora_dropout: float=0.05, lora_target_modules: List[str]=['q_proj', 'v_proj'], train_on_inputs: bool=True, group_by_length: bool=False, wandb_project: str='', wandb_run_name: str='', wandb_watch: str='', wandb_log_model: str='', resume_from_checkpoint: str=None):
print(f'''Training Alpaca-LoRA model with params:
base_model: {base_model}
data_path: {data_path}
output_dir: {output_dir}
batch_size: {batch_size}
micro_batch_size: {micro_batch_size}
num_epochs: {num_epochs}
learning_rate: {learning_rate}
cutoff_len: {cutoff_len}
val_set_size: {val_set_size}
lora_r: {lora_r}
lora_alpha: {lora_alpha}
lora_dropout: {lora_dropout}
lora_target_modules: {lora_target_modules}
train_on_inputs: {train_on_inputs}
group_by_length: {group_by_length}
wandb_project: {wandb_project}
wandb_run_name: {wandb_run_name}
wandb_watch: {wandb_watch}
wandb_log_model: {wandb_log_model}
resume_from_checkpoint: {resume_from_checkpoint}
''')
assert base_model, "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
gradient_accumulation_steps = (batch_size // micro_batch_size)
device_map = 'auto'
world_size = int(os.environ.get('WORLD_SIZE', 1))
ddp = (world_size != 1)
if ddp:
device_map = {'': int((os.environ.get('LOCAL_RANK') or 0))}
gradient_accumulation_steps = (gradient_accumulation_steps // world_size)
use_wandb = ((len(wandb_project) > 0) or (('WANDB_PROJECT' in os.environ) and (len(os.environ['WANDB_PROJECT']) > 0)))
if (len(wandb_project) > 0):
os.environ['WANDB_PROJECT'] = wandb_project
if (len(wandb_watch) > 0):
os.environ['WANDB_WATCH'] = wandb_watch
if (len(wandb_log_model) > 0):
os.environ['WANDB_LOG_MODEL'] = wandb_log_model
model = LlamaForCausalLM.from_pretrained(base_model, load_in_8bit=True, torch_dtype=torch.float16, device_map=device_map)
tokenizer = LlamaTokenizer.from_pretrained(base_model)
tokenizer.pad_token_id = 0
tokenizer.padding_side = 'left'
def tokenize(prompt, add_eos_token=True):
result = tokenizer(prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None)
if ((result['input_ids'][(- 1)] != tokenizer.eos_token_id) and (len(result['input_ids']) < cutoff_len) and add_eos_token):
result['input_ids'].append(tokenizer.eos_token_id)
result['attention_mask'].append(1)
result['labels'] = result['input_ids'].copy()
return result
def generate_and_tokenize_prompt(data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(full_prompt)
if (not train_on_inputs):
user_prompt = generate_prompt({**data_point, 'output': ''})
tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt['input_ids'])
tokenized_full_prompt['labels'] = (([(- 100)] * user_prompt_len) + tokenized_full_prompt['labels'][user_prompt_len:])
return tokenized_full_prompt
model = prepare_model_for_int8_training(model)
config = LoraConfig(r=lora_r, lora_alpha=lora_alpha, target_modules=lora_target_modules, lora_dropout=lora_dropout, bias='none', task_type='CAUSAL_LM')
model = get_peft_model(model, config)
if (data_path.endswith('.json') or data_path.endswith('.jsonl')):
data = load_dataset('json', data_files=data_path)
else:
data = load_dataset(data_path)
if resume_from_checkpoint:
checkpoint_name = os.path.join(resume_from_checkpoint, 'pytorch_model.bin')
if (not os.path.exists(checkpoint_name)):
checkpoint_name = os.path.join(resume_from_checkpoint, 'adapter_model.bin')
resume_from_checkpoint = False
if os.path.exists(checkpoint_name):
print(f'Restarting from {checkpoint_name}')
adapters_weights = torch.load(checkpoint_name)
model = set_peft_model_state_dict(model, adapters_weights)
else:
print(f'Checkpoint {checkpoint_name} not found')
model.print_trainable_parameters()
if (val_set_size > 0):
train_val = data['train'].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_data = train_val['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = train_val['test'].shuffle().map(generate_and_tokenize_prompt)
else:
train_data = data['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = None
if ((not ddp) and (torch.cuda.device_count() > 1)):
model.is_parallelizable = True
model.model_parallel = True
trainer = transformers.Trainer(model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments(per_device_train_batch_size=micro_batch_size, gradient_accumulation_steps=gradient_accumulation_steps, warmup_steps=100, num_train_epochs=num_epochs, learning_rate=learning_rate, fp16=True, logging_steps=10, optim='adamw_torch', evaluation_strategy=('steps' if (val_set_size > 0) else 'no'), save_strategy='steps', eval_steps=(200 if (val_set_size > 0) else None), save_steps=200, output_dir=output_dir, save_total_limit=3, load_best_model_at_end=(True if (val_set_size > 0) else False), ddp_find_unused_parameters=(False if ddp else None), group_by_length=group_by_length, report_to=('wandb' if use_wandb else None), run_name=(wandb_run_name if use_wandb else None)), data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, pad_to_multiple_of=8, return_tensors='pt', padding=True))
model.config.use_cache = False
old_state_dict = model.state_dict
model.state_dict = (lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())).__get__(model, type(model))
if ((torch.__version__ >= '2') and (sys.platform != 'win32')):
model = torch.compile(model)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
model.save_pretrained(output_dir)
print("\n If there's a warning about missing keys above, please disregard :)")
|
def generate_prompt(data_point):
if data_point['input']:
return f''' # noqa: E501
{data_point['instruction']}
### input:
{data_point['input']}
### Response:
{data_point['output']}'''
else:
return f'''Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{data_point['instruction']}
### Response:
{data_point['output']}'''
|
def generate_prompt(data_point):
if data_point['input']:
return f''' # noqa: E501
{data_point['instruction']}
### input:
{data_point['input']}
### Response:
{data_point['output']}'''
else:
return f'''Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{data_point['instruction']}
### Response:
{data_point['output']}'''
|
def generate_and_tokenize_prompt(data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(full_prompt)
user_prompt = generate_prompt({**data_point, 'output': ''})
tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt['input_ids'])
tokenized_full_prompt['labels'] = (([(- 100)] * user_prompt_len) + tokenized_full_prompt['labels'][user_prompt_len:])
return tokenized_full_prompt
|
def tokenize(prompt, add_eos_token=True):
cutoff_len = 256
result = tokenizer(prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None)
if ((result['input_ids'][(- 1)] != tokenizer.eos_token_id) and (len(result['input_ids']) < cutoff_len) and add_eos_token):
result['input_ids'].append(tokenizer.eos_token_id)
result['attention_mask'].append(1)
result['labels'] = result['input_ids'].copy()
return result
|
def permute(w):
return w.view(n_heads, ((dim // n_heads) // 2), 2, dim).transpose(1, 2).reshape(dim, dim)
|
def unpermute(w):
return w.view(n_heads, 2, ((dim // n_heads) // 2), dim).transpose(1, 2).reshape(dim, dim)
|
def translate_state_dict_key(k):
k = k.replace('base_model.model.', '')
if (k == 'model.embed_tokens.weight'):
return 'tok_embeddings.weight'
elif (k == 'model.norm.weight'):
return 'norm.weight'
elif (k == 'lm_head.weight'):
return 'output.weight'
elif k.startswith('model.layers.'):
layer = k.split('.')[2]
if k.endswith('.self_attn.q_proj.weight'):
return f'layers.{layer}.attention.wq.weight'
elif k.endswith('.self_attn.k_proj.weight'):
return f'layers.{layer}.attention.wk.weight'
elif k.endswith('.self_attn.v_proj.weight'):
return f'layers.{layer}.attention.wv.weight'
elif k.endswith('.self_attn.o_proj.weight'):
return f'layers.{layer}.attention.wo.weight'
elif k.endswith('.mlp.gate_proj.weight'):
return f'layers.{layer}.feed_forward.w1.weight'
elif k.endswith('.mlp.down_proj.weight'):
return f'layers.{layer}.feed_forward.w2.weight'
elif k.endswith('.mlp.up_proj.weight'):
return f'layers.{layer}.feed_forward.w3.weight'
elif k.endswith('.input_layernorm.weight'):
return f'layers.{layer}.attention_norm.weight'
elif k.endswith('.post_attention_layernorm.weight'):
return f'layers.{layer}.ffn_norm.weight'
elif (k.endswith('rotary_emb.inv_freq') or ('lora' in k)):
return None
else:
print(layer, k)
raise NotImplementedError
else:
print(k)
raise NotImplementedError
|
def main(load_8bit: bool=False, base_model: str='', lora_weights: str='tloen/alpaca-lora-7b', share_gradio: bool=True):
assert base_model, "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
tokenizer = LlamaTokenizer.from_pretrained(base_model)
if (device == 'cuda'):
model = LlamaForCausalLM.from_pretrained(base_model, load_in_8bit=load_8bit, torch_dtype=torch.float16, device_map='auto')
model = PeftModel.from_pretrained(model, lora_weights, torch_dtype=torch.float16)
elif (device == 'mps'):
model = LlamaForCausalLM.from_pretrained(base_model, device_map={'': device}, torch_dtype=torch.float16)
model = PeftModel.from_pretrained(model, lora_weights, device_map={'': device}, torch_dtype=torch.float16)
else:
model = LlamaForCausalLM.from_pretrained(base_model, device_map={'': device}, low_cpu_mem_usage=True)
model = PeftModel.from_pretrained(model, lora_weights, device_map={'': device})
model.config.pad_token_id = tokenizer.pad_token_id = 0
model.config.bos_token_id = 1
model.config.eos_token_id = 2
if (not load_8bit):
model.half()
model.eval()
if ((torch.__version__ >= '2') and (sys.platform != 'win32')):
model = torch.compile(model)
def evaluate(instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=128, **kwargs):
prompt = generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors='pt')
input_ids = inputs['input_ids'].to(device)
generation_config = GenerationConfig(temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, num_return_sequences=num_beams, **kwargs)
with torch.no_grad():
generation_output = model.generate(input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens)
s = []
for i in range(num_beams):
temp = generation_output.sequences[i]
s.append(tokenizer.decode(temp, skip_special_tokens=True))
output = ''
for cur in s:
output += (cur.split('### Response:')[1].strip() + '\n')
return output
gr.Interface(fn=evaluate, inputs=[gr.components.Textbox(lines=2, label='Instruction', placeholder='Tell me about alpacas.'), gr.components.Textbox(lines=2, label='Input', placeholder='none'), gr.components.Slider(minimum=0, maximum=1, value=0.1, label='Temperature'), gr.components.Slider(minimum=0, maximum=1, value=0.75, label='Top p'), gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label='Top k'), gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label='Beams'), gr.components.Slider(minimum=1, maximum=2000, step=1, value=128, label='Max tokens')], outputs=[gr.inputs.Textbox(lines=10, label='Output')], title='Wiselab-LoRA', description='Wiselab-LoRA is a 7B-parameter LLaMA-LoRA based model.').launch(share=share_gradio)
'\n # testing code for readme\n for instruction in [\n "Tell me about alpacas.",\n "Tell me about the president of Mexico in 2019.",\n "Tell me about the king of France in 2019.",\n "List all Canadian provinces in alphabetical order.",\n "Write a Python program that prints the first 10 Fibonacci numbers.",\n "Write a program that prints the numbers from 1 to 100. But for multiples of three print \'Fizz\' instead of the number and for the multiples of five print \'Buzz\'. For numbers which are multiples of both three and five print \'FizzBuzz\'.", # noqa: E501\n "Tell me five words that rhyme with \'shock\'.",\n "Translate the sentence \'I have no mouth but I must scream\' into Spanish.",\n "Count up from 1 to 500.",\n ]:\n print("Instruction:", instruction)\n print("Response:", evaluate(instruction))\n print()\n '
|
def generate_prompt(instruction, input=None):
if input:
return f'''Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{instruction}
### Input:
{input}
### Response:
'''
else:
return f'''Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{instruction}
### Response:
'''
|
def train(base_model: str='', data_path: str='yahma/alpaca-cleaned', output_dir: str='/common/users/jj635/llama/mycheckpoint/', batch_size: int=128, micro_batch_size: int=4, num_epochs: int=3, learning_rate: float=0.0003, cutoff_len: int=256, val_set_size: int=0, lora_r: int=8, lora_alpha: int=16, lora_dropout: float=0.05, lora_target_modules: List[str]=['q_proj', 'v_proj'], train_on_inputs: bool=True, group_by_length: bool=False, wandb_project: str='', wandb_run_name: str='', wandb_watch: str='', wandb_log_model: str='', resume_from_checkpoint: str=None):
print(f'''Training Alpaca-LoRA model with params:
base_model: {base_model}
data_path: {data_path}
output_dir: {output_dir}
batch_size: {batch_size}
micro_batch_size: {micro_batch_size}
num_epochs: {num_epochs}
learning_rate: {learning_rate}
cutoff_len: {cutoff_len}
val_set_size: {val_set_size}
lora_r: {lora_r}
lora_alpha: {lora_alpha}
lora_dropout: {lora_dropout}
lora_target_modules: {lora_target_modules}
train_on_inputs: {train_on_inputs}
group_by_length: {group_by_length}
wandb_project: {wandb_project}
wandb_run_name: {wandb_run_name}
wandb_watch: {wandb_watch}
wandb_log_model: {wandb_log_model}
resume_from_checkpoint: {resume_from_checkpoint}
''')
assert base_model, "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
gradient_accumulation_steps = (batch_size // micro_batch_size)
device_map = 'auto'
world_size = int(os.environ.get('WORLD_SIZE', 1))
ddp = (world_size != 1)
if ddp:
device_map = {'': int((os.environ.get('LOCAL_RANK') or 0))}
gradient_accumulation_steps = (gradient_accumulation_steps // world_size)
use_wandb = ((len(wandb_project) > 0) or (('WANDB_PROJECT' in os.environ) and (len(os.environ['WANDB_PROJECT']) > 0)))
if (len(wandb_project) > 0):
os.environ['WANDB_PROJECT'] = wandb_project
if (len(wandb_watch) > 0):
os.environ['WANDB_WATCH'] = wandb_watch
if (len(wandb_log_model) > 0):
os.environ['WANDB_LOG_MODEL'] = wandb_log_model
model = LlamaForCausalLM.from_pretrained(base_model, load_in_8bit=True, torch_dtype=torch.float16, device_map=device_map)
tokenizer = LlamaTokenizer.from_pretrained(base_model)
tokenizer.pad_token_id = 0
tokenizer.padding_side = 'left'
def tokenize(prompt, add_eos_token=True):
result = tokenizer(prompt, truncation=True, max_length=cutoff_len, padding=False, return_tensors=None)
if ((result['input_ids'][(- 1)] != tokenizer.eos_token_id) and (len(result['input_ids']) < cutoff_len) and add_eos_token):
result['input_ids'].append(tokenizer.eos_token_id)
result['attention_mask'].append(1)
result['labels'] = result['input_ids'].copy()
return result
def generate_and_tokenize_prompt(data_point):
full_prompt = generate_prompt(data_point)
tokenized_full_prompt = tokenize(full_prompt)
if (not train_on_inputs):
user_prompt = generate_prompt({**data_point, 'output': ''})
tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
user_prompt_len = len(tokenized_user_prompt['input_ids'])
tokenized_full_prompt['labels'] = (([(- 100)] * user_prompt_len) + tokenized_full_prompt['labels'][user_prompt_len:])
return tokenized_full_prompt
model = prepare_model_for_int8_training(model)
config = LoraConfig(r=lora_r, lora_alpha=lora_alpha, target_modules=lora_target_modules, lora_dropout=lora_dropout, bias='none', task_type='CAUSAL_LM')
model = get_peft_model(model, config)
if (data_path.endswith('.json') or data_path.endswith('.jsonl')):
data = load_dataset('json', data_files=data_path)
else:
data = load_dataset(data_path)
if resume_from_checkpoint:
checkpoint_name = os.path.join(resume_from_checkpoint, 'pytorch_model.bin')
if (not os.path.exists(checkpoint_name)):
checkpoint_name = os.path.join(resume_from_checkpoint, 'adapter_model.bin')
resume_from_checkpoint = False
if os.path.exists(checkpoint_name):
print(f'Restarting from {checkpoint_name}')
adapters_weights = torch.load(checkpoint_name)
model = set_peft_model_state_dict(model, adapters_weights)
else:
print(f'Checkpoint {checkpoint_name} not found')
model.print_trainable_parameters()
if (val_set_size > 0):
train_val = data['train'].train_test_split(test_size=val_set_size, shuffle=True, seed=42)
train_data = train_val['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = train_val['test'].shuffle().map(generate_and_tokenize_prompt)
else:
train_data = data['train'].shuffle().map(generate_and_tokenize_prompt)
val_data = None
if ((not ddp) and (torch.cuda.device_count() > 1)):
model.is_parallelizable = True
model.model_parallel = True
trainer = transformers.Trainer(model=model, train_dataset=train_data, eval_dataset=val_data, args=transformers.TrainingArguments(per_device_train_batch_size=micro_batch_size, gradient_accumulation_steps=gradient_accumulation_steps, warmup_steps=100, num_train_epochs=num_epochs, learning_rate=learning_rate, fp16=True, logging_steps=10, optim='adamw_torch', evaluation_strategy=('steps' if (val_set_size > 0) else 'no'), save_strategy='steps', eval_steps=(200 if (val_set_size > 0) else None), save_steps=200, output_dir=output_dir, save_total_limit=3, load_best_model_at_end=(True if (val_set_size > 0) else False), ddp_find_unused_parameters=(False if ddp else None), group_by_length=group_by_length, report_to=('wandb' if use_wandb else None), run_name=(wandb_run_name if use_wandb else None)), data_collator=transformers.DataCollatorForSeq2Seq(tokenizer, pad_to_multiple_of=8, return_tensors='pt', padding=True))
model.config.use_cache = False
old_state_dict = model.state_dict
model.state_dict = (lambda self, *_, **__: get_peft_model_state_dict(self, old_state_dict())).__get__(model, type(model))
if ((torch.__version__ >= '2') and (sys.platform != 'win32')):
model = torch.compile(model)
trainer.train(resume_from_checkpoint=resume_from_checkpoint)
model.save_pretrained(output_dir)
print("\n If there's a warning about missing keys above, please disregard :)")
|
def generate_prompt(data_point):
if data_point['input']:
return f''' # noqa: E501
{data_point['instruction']}
### input:
{data_point['input']}
### Response:
{data_point['output']}'''
else:
return f'''Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
### Instruction:
{data_point['instruction']}
### Response:
{data_point['output']}'''
|
class CelebA(data.Dataset):
'Dataset class for the CelebA dataset.'
def __init__(self, image_dir, attr_path, selected_attrs, transform, mode):
'Initialize and preprocess the CelebA dataset.'
self.image_dir = image_dir
self.attr_path = attr_path
self.selected_attrs = selected_attrs
self.transform = transform
self.mode = mode
self.dataset = []
self.attr2idx = {}
self.idx2attr = {}
self.preprocess()
self.num_images = len(self.dataset)
def preprocess(self):
'Preprocess the CelebA attribute file.'
lines = [line.rstrip() for line in open(self.attr_path, 'r')]
all_attr_names = lines[1].split()
for (i, attr_name) in enumerate(all_attr_names):
self.attr2idx[attr_name] = i
self.idx2attr[i] = attr_name
lines = lines[2:]
random.seed(1234)
random.shuffle(lines)
for (i, line) in enumerate(lines):
split = line.split()
filename = split[0]
values = split[1:]
label = []
for attr_name in self.selected_attrs:
idx = self.attr2idx[attr_name]
label.append((values[idx] == '1'))
self.dataset.append([filename, label])
print('Finished preprocessing the CelebA dataset...')
def __getitem__(self, index):
'Return one image and its corresponding attribute label.'
dataset = self.dataset
(filename, label) = dataset[index]
image = Image.open(os.path.join(self.image_dir, filename))
return (self.transform(image), torch.FloatTensor(label))
def __len__(self):
'Return the number of images.'
return self.num_images
|
def get_loader(batch_size, image_dir, attr_path, selected_attrs, crop_size=178, image_size=128, dataset='CelebA', mode='train', num_workers=16):
'Build and return a data loader.'
transform = []
if (mode == 'train'):
transform.append(T.RandomHorizontalFlip())
transform.append(T.CenterCrop(crop_size))
transform.append(T.Resize(image_size))
transform.append(T.ToTensor())
transform.append(T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))
transform = T.Compose(transform)
dataset = CelebA(image_dir, attr_path, selected_attrs, transform, mode)
data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=(mode == 'train'), num_workers=num_workers)
return data_loader
|
class EncoderBlock(nn.Module):
def __init__(self, channel_in, channel_out):
super(EncoderBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=channel_in, out_channels=channel_out, kernel_size=5, padding=2, stride=2, bias=False)
self.bn = nn.BatchNorm2d(num_features=channel_out, momentum=0.9)
def forward(self, ten, out=False, t=False):
if out:
ten = self.conv(ten)
ten_out = ten
ten = self.bn(ten)
ten = F.relu(ten, False)
return (ten, ten_out)
else:
ten = self.conv(ten)
ten = self.bn(ten)
ten = F.relu(ten, True)
return ten
|
class DecoderBlock(nn.Module):
def __init__(self, channel_in, channel_out):
super(DecoderBlock, self).__init__()
self.conv = nn.ConvTranspose2d(channel_in, channel_out, kernel_size=5, padding=2, stride=2, output_padding=1, bias=False)
self.bn = nn.BatchNorm2d(channel_out, momentum=0.9)
def forward(self, ten):
ten = self.conv(ten)
ten = self.bn(ten)
ten = F.relu(ten, True)
return ten
|
class Encoder(nn.Module):
def __init__(self, channel_in=3, z_size=128):
super(Encoder, self).__init__()
self.size = channel_in
layers_list = []
for i in range(3):
if (i == 0):
layers_list.append(EncoderBlock(channel_in=self.size, channel_out=64))
self.size = 64
else:
layers_list.append(EncoderBlock(channel_in=self.size, channel_out=(self.size * 2)))
self.size *= 2
self.conv = nn.Sequential(*layers_list)
self.fc = nn.Sequential(nn.Linear(in_features=((8 * 8) * self.size), out_features=1024, bias=False), nn.BatchNorm1d(num_features=1024, momentum=0.9), nn.ReLU(True))
self.l_mu = nn.Linear(in_features=1024, out_features=z_size)
self.l_var = nn.Linear(in_features=1024, out_features=z_size)
def forward(self, ten):
ten = self.conv(ten)
ten = ten.view(len(ten), (- 1))
ten = self.fc(ten)
mu = self.l_mu(ten)
logvar = self.l_var(ten)
return (mu, logvar)
def __call__(self, *args, **kwargs):
return super(Encoder, self).__call__(*args, **kwargs)
|
class Decoder(nn.Module):
def __init__(self, z_size, size):
super(Decoder, self).__init__()
self.fc = nn.Sequential(nn.Linear(in_features=z_size, out_features=((8 * 8) * size), bias=False), nn.BatchNorm1d(num_features=((8 * 8) * size), momentum=0.9), nn.ReLU(True))
self.size = size
layers_list = []
layers_list.append(DecoderBlock(channel_in=self.size, channel_out=self.size))
layers_list.append(DecoderBlock(channel_in=self.size, channel_out=(self.size // 2)))
self.size = (self.size // 2)
layers_list.append(DecoderBlock(channel_in=self.size, channel_out=(self.size // 4)))
self.size = (self.size // 4)
layers_list.append(nn.Sequential(nn.Conv2d(in_channels=self.size, out_channels=3, kernel_size=5, stride=1, padding=2), nn.Tanh()))
self.conv = nn.Sequential(*layers_list)
def forward(self, ten):
ten = self.fc(ten)
ten = ten.view(len(ten), (- 1), 8, 8)
ten = self.conv(ten)
return ten
def __call__(self, *args, **kwargs):
return super(Decoder, self).__call__(*args, **kwargs)
|
class Discriminator(nn.Module):
def __init__(self, channel_in=3, recon_level=3):
super(Discriminator, self).__init__()
self.size = channel_in
self.recon_levl = recon_level
self.conv = nn.ModuleList()
self.conv.append(nn.Sequential(nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2), nn.ReLU(inplace=True)))
self.size = 32
self.conv.append(EncoderBlock(channel_in=self.size, channel_out=128))
self.size = 128
self.conv.append(EncoderBlock(channel_in=self.size, channel_out=256))
self.size = 256
self.conv.append(EncoderBlock(channel_in=self.size, channel_out=256))
self.fc = nn.Sequential(nn.Linear(in_features=((8 * 8) * self.size), out_features=512, bias=False), nn.BatchNorm1d(num_features=512, momentum=0.9), nn.ReLU(inplace=True), nn.Linear(in_features=512, out_features=1))
def forward(self, ten_orig, ten_predicted, ten_sampled, mode='REC'):
if (mode == 'REC'):
ten = torch.cat((ten_orig, ten_predicted, ten_sampled), 0)
for (i, lay) in enumerate(self.conv):
if (i == self.recon_levl):
(ten, layer_ten) = lay(ten, True)
layer_ten = layer_ten.view(len(layer_ten), (- 1))
return layer_ten
else:
ten = lay(ten)
else:
ten = torch.cat((ten_orig, ten_predicted, ten_sampled), 0)
for (i, lay) in enumerate(self.conv):
ten = lay(ten)
ten = ten.view(len(ten), (- 1))
ten = self.fc(ten)
return F.sigmoid(ten)
def __call__(self, *args, **kwargs):
return super(Discriminator, self).__call__(*args, **kwargs)
|
class VaeGan(nn.Module):
def __init__(self, z_size=128, recon_level=3):
super(VaeGan, self).__init__()
self.z_size = z_size
self.encoder = Encoder(z_size=self.z_size)
self.decoder = Decoder(z_size=self.z_size, size=self.encoder.size)
self.discriminator = Discriminator(channel_in=3, recon_level=recon_level)
self.init_parameters()
def init_parameters(self):
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
if (hasattr(m, 'weight') and (m.weight is not None) and m.weight.requires_grad):
scale = (1.0 / numpy.sqrt(numpy.prod(m.weight.shape[1:])))
scale /= numpy.sqrt(3)
nn.init.uniform(m.weight, (- scale), scale)
if (hasattr(m, 'bias') and (m.bias is not None) and m.bias.requires_grad):
nn.init.constant(m.bias, 0.0)
def reparameterize(self, mu, logvar):
logvar = logvar.mul(0.5).exp_()
eps = Variable(logvar.data.new(logvar.size()).normal_())
return eps.mul(logvar).add_(mu)
def forward(self, x, gen_size=10):
if self.training:
(mus, log_variances) = self.encoder(x)
z = self.reparameterize(mus, log_variances)
x_tilde = self.decoder(z)
z_p = Variable(torch.randn(len(x), self.z_size).cuda(), requires_grad=True)
x_p = self.decoder(z_p)
disc_layer = self.discriminator(x, x_tilde, x_p, 'REC')
disc_class = self.discriminator(x, x_tilde, x_p, 'GAN')
return (x_tilde, disc_class, disc_layer, mus, log_variances)
elif (x is None):
z_p = Variable(torch.randn(gen_size, self.z_size).cuda(), requires_grad=False)
x_p = self.decoder(z_p)
return x_p
else:
(mus, log_variances) = self.encoder(x)
z = self.reparameterize(mus, log_variances)
x_tilde = self.decoder(z)
return x_tilde
def __call__(self, *args, **kwargs):
return super(VaeGan, self).__call__(*args, **kwargs)
@staticmethod
def loss(x, x_tilde, disc_layer_original, disc_layer_predicted, disc_layer_sampled, disc_class_original, disc_class_predicted, disc_class_sampled, mus, variances):
nle = (0.5 * ((x.view(len(x), (- 1)) - x_tilde.view(len(x_tilde), (- 1))) ** 2))
kl = ((- 0.5) * torch.sum(((((- variances.exp()) - torch.pow(mus, 2)) + variances) + 1), 1))
mse = torch.sum((0.5 * ((disc_layer_original - disc_layer_predicted) ** 2)), 1)
bce_dis_original = (- torch.log((disc_class_original + 0.001)))
bce_dis_predicted = (- torch.log(((1 - disc_class_predicted) + 0.001)))
bce_dis_sampled = (- torch.log(((1 - disc_class_sampled) + 0.001)))
return (nle, kl, mse, bce_dis_original, bce_dis_predicted, bce_dis_sampled)
|
def adult_benchmark():
data = Dataset.load('../data/adult.csv', '../data/adult-domain.json')
projections = [('occupation', 'race', 'capital-loss'), ('occupation', 'sex', 'native-country'), ('marital-status', 'relationship', 'income>50K'), ('age', 'education-num', 'sex'), ('workclass', 'education-num', 'occupation'), ('marital-status', 'occupation', 'income>50K'), ('race', 'native-country', 'income>50K'), ('occupation', 'capital-gain', 'income>50K'), ('marital-status', 'hours-per-week', 'income>50K'), ('workclass', 'race', 'capital-gain'), ('marital-status', 'relationship', 'capital-gain'), ('workclass', 'education-num', 'capital-gain'), ('education-num', 'relationship', 'race'), ('fnlwgt', 'hours-per-week', 'income>50K'), ('workclass', 'sex', 'native-country')]
lookup = {}
for attr in data.domain:
n = data.domain.size(attr)
lookup[attr] = workload.Identity(n)
lookup['age'] = workload.Prefix(85)
lookup['fnlwgt'] = workload.Prefix(100)
lookup['capital-gain'] = workload.Prefix(100)
lookup['capital-loss'] = workload.Prefix(100)
lookup['hours-per-week'] = workload.Prefix(99)
workloads = []
for proj in projections:
W = workload.Kronecker([lookup[a] for a in proj])
workloads.append((proj, W))
return (data, workloads)
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = 'adult'
params['engines'] = ['MD', 'RDA']
params['iters'] = 10000
params['epsilon'] = 1.0
params['delta'] = 0.0
params['bounded'] = True
params['frequency'] = 1
params['seed'] = 0
params['save'] = None
params['load'] = None
params['plot'] = None
return params
|
class Negated(matrix.EkteloMatrix):
def __init__(self, Q):
self.Q = Q
self.shape = Q.shape
self.dtype = np.float64
@property
def matrix(self):
return (1 - self.Q.dense_matrix())
def _matvec(self, x):
return (x.sum() - self.Q.dot(x))
def _transpose(self):
return Negated(self.Q.T)
|
def max_sum_ve(factors, domain=None, elim=None):
' run max-product variable elimination on the factors\n return the most likely assignment as a dictionary where\n keys are attributes\n values are elements of the domain\n '
if (domain is None):
domain = reduce(Domain.merge, [F.domain for F in factors])
if (elim is None):
cliques = [F.domain.attrs for F in factors]
elim = graphical_model.greedy_order(domain, cliques, domain.attrs)
k = len(factors)
phi = dict(zip(range(k), factors))
psi = {}
for z in elim:
phi2 = [phi.pop(i) for i in list(phi.keys()) if (z in phi[i].domain)]
psi[z] = sum(phi2, Factor.ones(domain.project(z)))
phi[k] = psi[z].max([z])
k += 1
value = phi[(k - 1)]
x = {}
for z in reversed(elim):
x[z] = psi[z].condition(x).values.argmax()
df = pd.DataFrame(x, index=[0])
return Dataset(df, domain)
|
def answer_workload(workload, data):
ans = [W.dot(data.project(cl).datavector()) for (cl, W) in workload]
return np.concatenate(ans)
|
def DualQuery(data, workload, eps=1.0, delta=0.001, seed=0):
prng = np.random.RandomState(seed)
total = data.df.shape[0]
domain = data.domain
answers = (answer_workload(workload, data) / total)
nu = 2.0
s = 50
T = 2
while (((((2 * nu) * (T - 1)) / total) * np.sqrt((((((2 * s) * (T - 1)) * np.log((1.0 / delta))) + ((s * (T - 1)) * np.exp((((2 * nu) * (T - 1)) / total)))) - 1))) < eps):
T = (T + 1)
T = (T - 1)
Qsize = sum((W.shape[0] for (_, W) in workload))
Xsize = data.domain.size()
Q = (np.ones(Qsize) / Qsize)
cache = []
lookup = [(cl, W, i) for (cl, W) in workload for i in range(W.shape[0])]
results = []
for i in range(T):
idx = prng.choice(Qsize, s, True, Q)
queries = []
for i in idx:
(cl, W, e) = lookup[i]
dom = domain.project(cl)
n = W.shape[0]
z = np.zeros(n)
z[e] = 1.0
q = W.T.dot(z)
queries.append(Factor(dom, (- q)))
best = max_sum_ve(queries, data.domain)
curr = answer_workload(workload, best)
Q *= np.exp(((- nu) * (answers - curr)))
Q /= Q.sum()
cache.append((idx, curr))
results.append(best.df)
synthetic = Dataset(pd.concat(results), data.domain)
print('Iterations', T)
print('Privacy level', ((((nu * T) * (T - 1)) * s) / total))
delta = 0.001
eps = ((((2 * nu) * (T - 1)) / total) * np.sqrt((((((2 * s) * (T - 1)) * np.log((1.0 / delta))) + ((s * (T - 1)) * np.exp((((2 * nu) * (T - 1)) / total)))) - 1)))
print('Approx privacy level', eps, delta)
return (synthetic, cache)
|
def log_likelihood(answers, cache):
nu = 2.0
Qsize = sum((W.shape[0] for (_, W) in workload))
logQ = (np.zeros(Qsize) - np.log(Qsize))
ans = 0
for (idx, a) in cache:
probas = logQ[idx]
logQ = (logQ - (nu * (answers - a)))
logQ = (logQ - logsumexp(logQ))
ans += np.sum(probas)
return (- ans)
|
def marginal_loss(marginals, workload, cache):
answers = []
for (proj, W) in workload:
for cl in marginals:
if (set(proj) <= set(cl)):
mu = marginals[cl].project(proj)
x = mu.values.flatten()
answers.append(W.dot(x))
break
total = x.sum()
answers = (np.concatenate(answers) / total)
gradient = grad(log_likelihood, argnum=0)
loss = log_likelihood(answers, cache)
danswers = gradient(answers, cache)
i = 0
gradients = {cl: Factor.zeros(marginals[cl].domain) for cl in marginals}
for (proj, W) in workload:
for cl in marginals:
if (set(proj) <= set(cl)):
m = W.shape[0]
dmu = (W.T.dot(danswers[i:(i + m)]) / total)
dom = gradients[cl].domain.project(proj)
gradients[cl] += Factor(dom, dmu)
i += m
break
print(loss)
return (loss, graphical_model.CliqueVector(gradients))
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = 'adult'
params['iters'] = 10000
params['epsilon'] = 1.0
params['seed'] = 0
return params
|
def get_measurements(domain, workload):
lookup = {}
for attr in domain:
n = domain.size(attr)
lookup[attr] = Identity(n)
lookup['age'] = EkteloMatrix(np.load('prefix-85.npy'))
lookup['fnlwgt'] = EkteloMatrix(np.load('prefix-100.npy'))
lookup['capital-gain'] = EkteloMatrix(np.load('prefix-100.npy'))
lookup['capital-loss'] = EkteloMatrix(np.load('prefix-100.npy'))
lookup['hours-per-week'] = EkteloMatrix(np.load('prefix-99.npy'))
measurements = []
for (proj, _) in workload:
Q = Kronecker([lookup[a] for a in proj])
measurements.append((proj, Q.sparse_matrix()))
return measurements
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = 'adult'
params['iters'] = 10000
params['epsilon'] = 1.0
params['seed'] = 0
return params
|
class ProductDist():
' factored representation of data from MWEM paper '
def __init__(self, factors, domain, total):
'\n :param factors: a list of contingency tables, \n defined over disjoint subsets of attributes\n :param domain: the domain object\n :param total: known or estimated total\n '
self.factors = factors
self.domain = domain
self.total = total
for a in domain:
if (not any(((a in f.domain) for f in factors))):
sub = domain.project([a])
x = (np.ones(domain[a]) / domain[a])
factors.append(Factor(sub, x))
def project(self, cols):
domain = self.domain.project(cols)
factors = []
for factor in self.factors:
pcol = [c for c in cols if (c in factor.domain)]
if (pcol != []):
factors.append(factor.project(pcol))
return ProductDist(factors, domain, self.total)
def datavector(self):
ans = reduce((lambda x, y: (x * y)), self.factors, 1.0)
ans = ans.transpose(self.domain.attrs)
return (ans.values.flatten() * self.total)
|
class FactoredMultiplicativeWeights():
def __init__(self, domain, iters=100):
self.domain = domain
self.iters = iters
def infer(self, measurements, total):
self.multWeightsFast(measurements, total)
return self.model
def multWeightsFast(self, measurements, total):
domain = self.domain
(groups, projections) = _cluster(measurements)
factors = []
for (group, proj) in zip(groups, projections):
dom = self.domain.project(proj)
fact = Factor.uniform(dom)
for i in range(self.iters):
update = Factor.zeros(dom)
for (Q, y, noise_scale, p) in group:
dom2 = dom.project(p)
hatx = (fact.project(p).values.flatten() * total)
error = (y - Q.dot(hatx))
update += Factor(dom2, Q.T.dot(error).reshape(dom2.shape))
fact *= np.exp((update / (2 * total)))
fact /= fact.sum()
factors.append(fact)
self.model = ProductDist(factors, self.domain, total)
|
def _cluster(measurement_cache):
'\n Cluster the measurements into disjoint subsets by finding the connected \n components of the graph implied by the measurement projections\n '
k = len(measurement_cache)
G = sparse.dok_matrix((k, k))
for (i, (_, _, _, p)) in enumerate(measurement_cache):
for (j, (_, _, _, q)) in enumerate(measurement_cache):
if (len((set(p) & set(q))) >= 1):
G[(i, j)] = 1
(ncomps, labels) = sparse.csgraph.connected_components(G)
groups = [[] for _ in range(ncomps)]
projections = [set() for _ in range(ncomps)]
for (i, group) in enumerate(labels):
groups[group].append(measurement_cache[i])
projections[group] |= set(measurement_cache[i][3])
projections = [tuple(p) for p in projections]
return (groups, projections)
|
def average_error(workload, data, est):
errors = []
for (ax, W) in workload:
x = data.project(ax).datavector()
xest = est.project(ax).datavector()
ans = W.dot((x - xest))
err = (np.linalg.norm(W.dot((x - xest)), 1) / np.linalg.norm(W.dot(x), 1))
errors.append(err)
return np.mean(errors)
|
def worst_approximated(data, est, workload, eps, prng=None):
if (prng == None):
prng = np.random
errors = np.array([])
for (ax, W) in workload:
x = data.project(ax).datavector()
xest = est.project(ax).datavector()
W = matrix.Identity(x.size)
errors = np.append(errors, np.abs(W.dot((x - xest))))
merr = np.max(errors)
prob = np.exp(((eps * (errors - merr)) / 2.0))
key = prng.choice(len(errors), p=(prob / prob.sum()))
print(key)
for (ax, W) in workload:
if (key < W.shape[0]):
return (ax, W[key])
key = (key - W.shape[0])
|
def mwem(workload, data, eps, engine, iters=10, prng=None, out=None):
eps1 = (eps / (2.0 * iters))
total = data.df.shape[0]
measurements = []
est = engine.infer(measurements, total)
for i in range(iters):
(ax, Q) = worst_approximated(data, est, workload, eps1, prng)
proj = data.project(ax)
ans = Q.dot(proj.datavector())
noise = np.random.laplace(loc=0, scale=(2.0 / eps1), size=Q.shape[0])
y = (ans + noise)
measurements.append((Q, y, (np.sqrt(2) / eps1), ax))
print(i, proj.domain)
est = engine.infer(measurements, total)
try:
size = est.potentials.size()
except:
size = sum((f.domain.size() for f in est.factors))
print(('%.3f MB' % ((8 * size) / (2 ** 20))))
return est
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = 'adult'
params['engine'] = 'PGM'
params['iters'] = 1000
params['rounds'] = None
params['epsilon'] = 1.0
params['seed'] = 0
return params
|
class Identity(sparse.linalg.LinearOperator):
def __init__(self, n):
self.shape = (n, n)
self.dtype = np.float64
def _matmat(self, X):
return X
def __matmul__(self, X):
return X
def _transpose(self):
return self
def _adjoint(self):
return self
|
def powerset(iterable):
"Returns powerset of set consisting of elements in ``iterable``.\n Args:\n iterable (iterable): Set that powerset will be taken over\n\n Returns:\n iterator: the powerset\n\n Example:\n >>> pset = powerset(['a','b'])\n >>> list(pset)\n [('a',), ('b',), ('a', 'b')]\n "
s = list(iterable)
return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(1, (len(s) + 1))))
|
def downward_closure(cliques):
"Returns the 'downward closure' of a list of cliques. The 'downward closure'\n is the union of powersets of each individual clique in a list. Elements within\n each clique are sorted, but the list of cliques is not.\n\n Args:\n cliques ([iterable]): List of cliques\n\n Returns:\n list: the downward closure of the set of variables in cliques\n\n Example:\n >>> downward_closure([[1,2],[2,3]])\n [(2,), (1,), (3,), (1, 2), (2, 3)]\n "
ans = set()
for proj in cliques:
ans.update(powerset(proj))
return list(sorted(ans, key=len))
|
def get_permutation_matrix(cl1, cl2, domain):
'Using the vector-of-counts representation of a database detailed in\n [Li 2012], we create a permutation matrix which maps the database with\n attributes in order cl1 to database with attributes in order cl2. Note that\n cl1 and cl2 contain the same elements, just in different order.\n\n Example of Concept:\n Let us define two example databases:\n\n Database A\n id a b c\n 1 0 1 0\n 2 0 0 0\n\n Database B\n id b c a\n 1 1 0 0\n 2 0 0 0\n\n We know that A = B since only the ordering of the attributes is changed.\n\n Let #(.) operation return the number of elements in a database\n which satisfy the condition within the parenthesis and let vec(.)\n be the vector-of-counts operation. Thus:\n\n vec(A) = [#(a=0,b=0,c=0),#(a=0,b=0,c=1),#(a=0,b=1,c=0),...,#(a=1,b=1,c=1)]\n = [1,0,1,0,0,0,0,0]\n vec(B) = [#(b=0,a=0,c=0),#(b=0,a=0,c=1),#(b=0,a=1,c=0),...,#(b=1,a=1,c=1)]\n = [1,0,0,0,1,0,0,0]\n\n Observe that vec(A) and vec(B) have the same values, but are just\n rearranged. Then, for any two equivalent databases A and B, the\n permutation matrix P is an 8x8 matrix such that:\n\n P @ vec(A) = vec(B).T\n\n For two identical database A and B.\n\n Args:\n cl1 (iterable): Input clique that permutation matrix maps from.\n cl2 (iterable): Target clique that permutation matrix maps to.\n domain (mbi.Domain): A mbi Domain object which holds the shape and names\n of each variable in the domain.\n\n Returns:\n scipy.sparse.csr_matrix: Sparse permutation matrix.\n\n Example:\n >>> domain = Domain(attrs=[1,2],shape=[2,2])\n >>> get_permutation_matrix([1,2],[2,1], domain).todense()\n matrix([[1., 0., 0., 0.],\n [0., 0., 1., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.]])\n '
assert (set(cl1) == set(cl2))
n = domain.size(cl1)
fac = Factor(domain.project(cl1), np.arange(n))
new = fac.transpose(cl2)
data = np.ones(n)
row_ind = fac.datavector()
col_ind = new.datavector()
return sparse.csr_matrix((data, (row_ind, col_ind)), shape=(n, n))
|
def get_aggregate(cl, matrices, domain):
'Returns additional measurement matrices by taking the Kronecker\n product between Identity and previous measurements.\n\n Args:\n cl (iterable): A clique marginal.\n matrices (dict): A dictionary of measurement matrices where the key is\n the clique and the value is the matrix.\n domain (mbi.Domain): A mbi Domain object which holds the shape and names\n of each variable in the domain.\n\n Returns:\n scipy.sparse.csr_matrix: Sparse matrix containing additional\n measurements.\n '
children = [r for r in matrices if ((set(r) < set(cl)) and ((len(r) + 1) == len(cl)))]
ans = [sparse.csr_matrix((0, domain.size(cl)))]
for c in children:
coef = (1.0 / np.sqrt(len(children)))
a = tuple((set(cl) - set(c)))
cl2 = (a + c)
Qc = matrices[c]
P = get_permutation_matrix(cl, cl2, domain)
T = np.ones(domain.size(a))
Q = (sparse.kron(T, Qc) @ P)
ans.append((coef * Q))
return sparse.vstack(ans)
|
def get_identity(cl, post_plausibility, domain):
'Determine which cells in the cl marginal *could* have a count above\n threshold based on previous measurements.\n\n Args:\n cl (iterable): A clique marginal\n post_plausibility (dict): Dictionary of previously taken measurements.\n The key is the clique and value is a Factor object.\n domain (mbi.Domain): A mbi Domain object which holds the shape and names\n of each variable in the domain\n\n Returns:\n scipy.sparse.csr_matrix: Sparse matrix object where cells identified as\n probably containing counts above threshold have value 1.\n '
children = [r for r in post_plausibility if ((set(r) < set(cl)) and ((len(r) + 1) == len(cl)))]
plausibility = Factor.ones(domain.project(cl))
for c in children:
plausibility *= post_plausibility[c]
row_ind = col_ind = np.nonzero(plausibility.datavector())[0]
data = np.ones_like(row_ind)
n = domain.size(cl)
Q = sparse.csr_matrix((data, (row_ind, col_ind)), (n, n))
return Q
|
def exponential_mechanism(q, eps, sensitivity, prng=np.random, monotonic=False):
'Performs the exponential mechanism. Returned results satisfy eps-DP.\n\n Args:\n q (ndarray): Weights for each item.\n eps (float): Privacy parameter.\n sensitivity (float): Sensitivity of the query.\n prng (np.random): Pseudo-random number generator to be used.\n monotonic (boolean): True if the addition of an new element to the\n selection set cannot cause the value of the query to increase, False\n otherwise.\n\n Returns:\n A list containing indices of chosen items.\n '
if (eps == np.inf):
eps = np.finfo(np.float64).max
coef = (1.0 if monotonic else 0.5)
scores = (((coef * eps) / sensitivity) * (q - q.max()))
probas = np.exp((scores - logsumexp(scores)))
return prng.choice(q.size, p=probas)
|
def select(data, model, rho, targets=[]):
'Selects additional measurements using Minimum Spanning Tree based method\n with the exponential mechanism being used to privately select candidate\n edges. Weights for each edge of the tree are based on the L1 norm between\n the marginal counts from the data and the marginal counts from the model.\n\n Args:\n data (mbi.Dataset): The sensitive dataset.\n model (mbi.GraphicalModel): The DP graphical model learned from the\n first round of measurements.\n rho (float): Remaining privacy budget, calculated using Gaussian DP.\n targets (list, optional): Target columns specified by the user. Default\n is ``[]``.\n\n Returns:\n List of additional measurements selected by the algorithm.\n '
weights = {}
candidates = list(itertools.combinations(data.domain.invert(targets), 2))
for (a, b) in candidates:
xhat = model.project(([a, b] + targets)).datavector()
x = data.project(([a, b] + targets)).datavector()
weights[(a, b)] = np.linalg.norm((x - xhat), 1)
T = nx.Graph()
T.add_nodes_from(data.domain.attrs)
ds = DisjointSet()
r = (len(data.domain) - len(targets))
epsilon = np.sqrt(((8 * rho) / (r - 1)))
for i in range((r - 1)):
candidates = [e for e in candidates if (not ds.connected(*e))]
wgts = np.array([weights[e] for e in candidates])
idx = exponential_mechanism(wgts, epsilon, sensitivity=1.0)
e = candidates[idx]
T.add_edge(*e)
ds.union(*e)
return [(e + tuple(targets)) for e in T.edges]
|
def adagrid(data, epsilon, delta, threshold, targets=[], split_strategy=None, **mbi_args):
'Implements the Adagrid mechanism used in Sprint 3 of NIST 2021\n Competition by Team Minutemen.\n\n Args:\n data (mbi.Dataset): The sensitive dataset.\n epsilon (float): Privacy parameter.\n delta (float): Delta parameter in approximate DP. Set to ``0`` if pure\n DP is required.\n threshold (float): Threshold for deciding which cells are\n likely to have non-zero counts .\n targets (iterable, optional): List of target columns. Default is\n ``[]``.\n iters (int): Number of iterations for Mirror Descent algorithm to run.\n split_strategy ([floats]): List of floats, each containing the\n fraction of the zCDP budget allocated to each step of the algorithm.\n mbi_args (kwargs): Args to pass to mbi.FactoredInference. Please refer\n to the comments within this class to determine which parameters to pass.\n\n Returns:\n mbi.Dataset: Dataset object holding synthetic dataset satisfying\n (epsilon, delta) DP\n '
rho = cdp_rho(epsilon, delta)
if (not split_strategy):
rho_step_1 = rho_step_2 = rho_step_3 = (rho / 3)
else:
assert (len(split_strategy) == 3)
(frac_1, frac_2, frac_3) = (np.array(split_strategy) / sum(split_strategy))
rho_step_1 = (rho * frac_1)
rho_step_2 = (rho * frac_2)
rho_step_3 = (rho * frac_3)
domain = data.domain
measurements = []
post_plausibility = {}
matrices = {}
step1_outer = [((a,) + tuple(targets)) for a in domain if (a not in targets)]
step1_all = downward_closure(step1_outer)
step1_sigma = (np.sqrt((0.5 / rho_step_1)) * np.sqrt(len(step1_all)))
for k in range(1, (len(targets) + 2)):
split = [cl for cl in step1_all if (len(cl) == k)]
print()
for cl in split:
I = sparse.eye(domain.size(cl))
Q1 = get_identity(cl, post_plausibility, domain)
Q2 = (get_aggregate(cl, matrices, domain) @ (I - Q1))
Q1 = Q1[(Q1.getnnz(1) > 0)]
Q = sparse.vstack([Q1, Q2])
Q.T = sparse.csr_matrix(Q.T)
print(('Measuring %s, L2 sensitivity %.6f' % (cl, np.sqrt(Q.power(2).sum(axis=0).max()))))
mu = data.project(cl).datavector()
y = ((Q @ mu) + np.random.normal(loc=0, scale=step1_sigma, size=Q.shape[0]))
est = (Q1.T @ y[:Q1.shape[0]])
post_plausibility[cl] = Factor(domain.project(cl), (est >= (step1_sigma * threshold)))
matrices[cl] = Q
measurements.append((Q, y, 1.0, cl))
engine = FactoredInference(domain, log=False, **mbi_args)
engine.estimate(measurements)
step2_queries = select(data, engine.model, rho_step_2, targets)
print()
step3_sigma = (np.sqrt(len(step2_queries)) * np.sqrt((0.5 / rho_step_3)))
for cl in step2_queries:
I = sparse.eye(domain.size(cl))
Q1 = get_identity(cl, post_plausibility, domain)
Q2 = (get_aggregate(cl, matrices, domain) @ (I - Q1))
Q1 = Q1[(Q1.getnnz(1) > 0)]
Q = sparse.vstack([Q1, Q2])
Q.T = sparse.csr_matrix(Q.T)
print(('Measuring %s, L2 sensitivity %.6f' % (cl, np.sqrt(Q.power(2).sum(axis=0).max()))))
mu = data.project(cl).datavector()
y = ((Q @ mu) + np.random.normal(loc=0, scale=step3_sigma, size=Q.shape[0]))
measurements.append((Q, y, 1.0, cl))
print()
print('Post-processing with Private-PGM, will take some time...')
model = engine.estimate(measurements)
return model.synthetic_data()
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = 'datasets/adult.zip'
params['domain'] = 'datasets/adult-domain.json'
params['epsilon'] = 1.0
params['delta'] = 1e-10
params['targets'] = []
params['pgm_iters'] = 2500
params['warm_start'] = True
params['metric'] = 'L2'
params['threshold'] = 5.0
params['split_strategy'] = [0.1, 0.1, 0.8]
params['save'] = 'out.csv'
return params
|
def powerset(iterable):
'powerset([1,2,3]) --> (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)'
s = list(iterable)
return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(1, (len(s) + 1))))
|
def downward_closure(Ws):
ans = set()
for proj in Ws:
ans.update(powerset(proj))
return list(sorted(ans, key=len))
|
def hypothetical_model_size(domain, cliques):
model = GraphicalModel(domain, cliques)
return ((model.size * 8) / (2 ** 20))
|
def compile_workload(workload):
def score(cl):
return sum((len((set(cl) & set(ax))) for ax in workload))
return {cl: score(cl) for cl in downward_closure(workload)}
|
def filter_candidates(candidates, model, size_limit):
ans = {}
free_cliques = downward_closure(model.cliques)
for cl in candidates:
cond1 = (hypothetical_model_size(model.domain, (model.cliques + [cl])) <= size_limit)
cond2 = (cl in free_cliques)
if (cond1 or cond2):
ans[cl] = candidates[cl]
return ans
|
class AIM(Mechanism):
def __init__(self, epsilon, delta, prng=None, rounds=None, max_model_size=80, structural_zeros={}):
super(AIM, self).__init__(epsilon, delta, prng)
self.rounds = rounds
self.max_model_size = max_model_size
self.structural_zeros = structural_zeros
def worst_approximated(self, candidates, answers, model, eps, sigma):
errors = {}
sensitivity = {}
for cl in candidates:
wgt = candidates[cl]
x = answers[cl]
bias = ((np.sqrt((2 / np.pi)) * sigma) * model.domain.size(cl))
xest = model.project(cl).datavector()
errors[cl] = (wgt * (np.linalg.norm((x - xest), 1) - bias))
sensitivity[cl] = abs(wgt)
max_sensitivity = max(sensitivity.values())
return self.exponential_mechanism(errors, eps, max_sensitivity)
def run(self, data, W):
rounds = (self.rounds or (16 * len(data.domain)))
workload = [cl for (cl, _) in W]
candidates = compile_workload(workload)
answers = {cl: data.project(cl).datavector() for cl in candidates}
oneway = [cl for cl in candidates if (len(cl) == 1)]
sigma = np.sqrt((rounds / ((2 * 0.9) * self.rho)))
epsilon = np.sqrt((((8 * 0.1) * self.rho) / rounds))
measurements = []
print('Initial Sigma', sigma)
rho_used = ((len(oneway) * 0.5) / (sigma ** 2))
for cl in oneway:
x = data.project(cl).datavector()
y = (x + self.gaussian_noise(sigma, x.size))
I = Identity(y.size)
measurements.append((I, y, sigma, cl))
zeros = self.structural_zeros
engine = FactoredInference(data.domain, iters=1000, warm_start=True, structural_zeros=zeros)
model = engine.estimate(measurements)
t = 0
terminate = False
while (not terminate):
t += 1
if ((self.rho - rho_used) < (2 * ((0.5 / (sigma ** 2)) + ((1.0 / 8) * (epsilon ** 2))))):
remaining = (self.rho - rho_used)
sigma = np.sqrt((1 / ((2 * 0.9) * remaining)))
epsilon = np.sqrt(((8 * 0.1) * remaining))
terminate = True
rho_used += (((1.0 / 8) * (epsilon ** 2)) + (0.5 / (sigma ** 2)))
size_limit = ((self.max_model_size * rho_used) / self.rho)
small_candidates = filter_candidates(candidates, model, size_limit)
cl = self.worst_approximated(small_candidates, answers, model, epsilon, sigma)
n = data.domain.size(cl)
Q = Identity(n)
x = data.project(cl).datavector()
y = (x + self.gaussian_noise(sigma, n))
measurements.append((Q, y, sigma, cl))
z = model.project(cl).datavector()
model = engine.estimate(measurements)
w = model.project(cl).datavector()
print('Selected', cl, 'Size', n, 'Budget Used', (rho_used / self.rho))
if (np.linalg.norm((w - z), 1) <= ((sigma * np.sqrt((2 / np.pi))) * n)):
print('(!!!!!!!!!!!!!!!!!!!!!!) Reducing sigma', (sigma / 2))
sigma /= 2
epsilon *= 2
print('Generating Data...')
engine.iters = 2500
model = engine.estimate(measurements)
synth = model.synthetic_data()
return synth
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = '../data/adult.csv'
params['domain'] = '../data/adult-domain.json'
params['epsilon'] = 1.0
params['delta'] = 1e-09
params['noise'] = 'laplace'
params['max_model_size'] = 80
params['degree'] = 2
params['num_marginals'] = None
params['max_cells'] = 10000
return params
|
def cdp_delta_standard(rho, eps):
assert (rho >= 0)
assert (eps >= 0)
if (rho == 0):
return 0
return math.exp(((- ((eps - rho) ** 2)) / (4 * rho)))
|
def cdp_delta(rho, eps):
assert (rho >= 0)
assert (eps >= 0)
if (rho == 0):
return 0
amin = 1.01
amax = (((eps + 1) / (2 * rho)) + 2)
for i in range(1000):
alpha = ((amin + amax) / 2)
derivative = (((((2 * alpha) - 1) * rho) - eps) + math.log1p(((- 1.0) / alpha)))
if (derivative < 0):
amin = alpha
else:
amax = alpha
delta = (math.exp((((alpha - 1) * ((alpha * rho) - eps)) + (alpha * math.log1p(((- 1) / alpha))))) / (alpha - 1.0))
return min(delta, 1.0)
|
def cdp_eps(rho, delta):
assert (rho >= 0)
assert (delta > 0)
if ((delta >= 1) or (rho == 0)):
return 0.0
epsmin = 0.0
epsmax = (rho + (2 * math.sqrt((rho * math.log((1 / delta))))))
for i in range(1000):
eps = ((epsmin + epsmax) / 2)
if (cdp_delta(rho, eps) <= delta):
epsmax = eps
else:
epsmin = eps
return epsmax
|
def cdp_rho(eps, delta):
assert (eps >= 0)
assert (delta > 0)
if (delta >= 1):
return 0.0
rhomin = 0.0
rhomax = (eps + 1)
for i in range(1000):
rho = ((rhomin + rhomax) / 2)
if (cdp_delta(rho, eps) <= delta):
rhomin = rho
else:
rhomax = rho
return rhomin
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = '../data/adult.csv'
params['domain'] = '../data/adult-domain.json'
params['epsilon'] = 1.0
params['delta'] = 1e-09
params['degree'] = 2
params['num_marginals'] = None
params['max_cells'] = 10000
params['noise'] = 'gaussian'
params['pgm_iters'] = 2500
params['restarts'] = 1
params['seed'] = 0
params['save'] = None
return params
|
def convert_matrix(domain, cliques):
weights = {}
for proj in cliques:
tpl = tuple([domain.attrs.index(i) for i in proj])
weights[tpl] = 1.0
return workload.Marginals.fromtuples(domain.shape, weights)
|
def convert_back(domain, Q):
cliques = []
weights = []
for Qi in Q.matrices:
wgt = Qi.weight
key = tuple([domain.attrs[i] for i in Qi.base.tuple()])
cliques.append(key)
weights.append(wgt)
return (cliques, weights)
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = '../data/adult.csv'
params['domain'] = '../data/adult-domain.json'
params['epsilon'] = 1.0
params['delta'] = 1e-09
params['degree'] = 2
params['num_marginals'] = None
params['max_cells'] = 10000
params['noise'] = 'gaussian'
params['parameterization'] = 'OPT+'
params['pgm_iters'] = 2500
params['restarts'] = 1
params['seed'] = 0
params['save'] = None
return params
|
def optm(queries, approx=False):
W = convert_matrix(data.domain, queries)
if os.path.exists(strategy_path):
print('loading strategy from file')
A = pickle.load(open(strategy_path, 'rb'))
else:
print('optimizing strategy, could take a while')
best_obj = np.inf
for _ in range(args.restarts):
if (args.parameterization == 'OPTM'):
temp = templates.Marginals(data.domain.shape, approx=(args.noise == 'gaussian'), seed=args.seed)
else:
temp = templates.MarginalUnionKron(data.domain.shape, len(queries), approx=(args.noise == 'gaussian'))
obj = temp.optimize(W)
if (obj < best_obj):
best_obj = obj
A = temp.strategy()
return convert_back(data.domain, A)
|
def opt_plus(queries, approx=False):
weights = np.ones(len(queries))
return (queries, weights)
|
def MST(data, epsilon, delta):
rho = cdp_rho(epsilon, delta)
sigma = np.sqrt((3 / (2 * rho)))
cliques = [(col,) for col in data.domain]
log1 = measure(data, cliques, sigma)
(data, log1, undo_compress_fn) = compress_domain(data, log1)
cliques = select(data, (rho / 3.0), log1)
log2 = measure(data, cliques, sigma)
engine = FactoredInference(data.domain, iters=5000)
est = engine.estimate((log1 + log2))
synth = est.synthetic_data()
return undo_compress_fn(synth)
|
def measure(data, cliques, sigma, weights=None):
if (weights is None):
weights = np.ones(len(cliques))
weights = (np.array(weights) / np.linalg.norm(weights))
measurements = []
for (proj, wgt) in zip(cliques, weights):
x = data.project(proj).datavector()
y = (x + np.random.normal(loc=0, scale=(sigma / wgt), size=x.size))
Q = sparse.eye(x.size)
measurements.append((Q, y, (sigma / wgt), proj))
return measurements
|
def compress_domain(data, measurements):
supports = {}
new_measurements = []
for (Q, y, sigma, proj) in measurements:
col = proj[0]
sup = (y >= (3 * sigma))
supports[col] = sup
if (supports[col].sum() == y.size):
new_measurements.append((Q, y, sigma, proj))
else:
y2 = np.append(y[sup], y[(~ sup)].sum())
I2 = np.ones(y2.size)
I2[(- 1)] = (1.0 / np.sqrt(((y.size - y2.size) + 1.0)))
y2[(- 1)] /= np.sqrt(((y.size - y2.size) + 1.0))
I2 = sparse.diags(I2)
new_measurements.append((I2, y2, sigma, proj))
undo_compress_fn = (lambda data: reverse_data(data, supports))
return (transform_data(data, supports), new_measurements, undo_compress_fn)
|
def exponential_mechanism(q, eps, sensitivity, prng=np.random, monotonic=False):
coef = (1.0 if monotonic else 0.5)
scores = (((coef * eps) / sensitivity) * q)
probas = np.exp((scores - logsumexp(scores)))
return prng.choice(q.size, p=probas)
|
def select(data, rho, measurement_log, cliques=[]):
engine = FactoredInference(data.domain, iters=1000)
est = engine.estimate(measurement_log)
weights = {}
candidates = list(itertools.combinations(data.domain.attrs, 2))
for (a, b) in candidates:
xhat = est.project([a, b]).datavector()
x = data.project([a, b]).datavector()
weights[(a, b)] = np.linalg.norm((x - xhat), 1)
T = nx.Graph()
T.add_nodes_from(data.domain.attrs)
ds = DisjointSet()
for e in cliques:
T.add_edge(*e)
ds.union(*e)
r = len(list(nx.connected_components(T)))
epsilon = np.sqrt(((8 * rho) / (r - 1)))
for i in range((r - 1)):
candidates = [e for e in candidates if (not ds.connected(*e))]
wgts = np.array([weights[e] for e in candidates])
idx = exponential_mechanism(wgts, epsilon, sensitivity=1.0)
e = candidates[idx]
T.add_edge(*e)
ds.union(*e)
return list(T.edges)
|
def transform_data(data, supports):
df = data.df.copy()
newdom = {}
for col in data.domain:
support = supports[col]
size = support.sum()
newdom[col] = int(size)
if (size < support.size):
newdom[col] += 1
mapping = {}
idx = 0
for i in range(support.size):
mapping[i] = size
if support[i]:
mapping[i] = idx
idx += 1
assert (idx == size)
df[col] = df[col].map(mapping)
newdom = Domain.fromdict(newdom)
return Dataset(df, newdom)
|
def reverse_data(data, supports):
df = data.df.copy()
newdom = {}
for col in data.domain:
support = supports[col]
mx = support.sum()
newdom[col] = int(support.size)
(idx, extra) = (np.where(support)[0], np.where((~ support))[0])
mask = (df[col] == mx)
if (extra.size == 0):
pass
else:
df.loc[(mask, col)] = np.random.choice(extra, mask.sum())
df.loc[((~ mask), col)] = idx[df.loc[((~ mask), col)]]
newdom = Domain.fromdict(newdom)
return Dataset(df, newdom)
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = '../data/adult.csv'
params['domain'] = '../data/adult-domain.json'
params['epsilon'] = 1.0
params['delta'] = 1e-09
params['degree'] = 2
params['num_marginals'] = None
params['max_cells'] = 10000
return params
|
def worst_approximated(workload_answers, est, workload, eps, penalty=True, bounded=False):
' Select a (noisy) worst-approximated marginal for measurement.\n \n :param workload_answers: a dictionary of true answers to the workload\n keys are cliques\n values are numpy arrays, corresponding to the counts in the marginal\n :param est: a GraphicalModel object that approximates the data distribution\n :param: workload: The list of candidates to consider in the exponential mechanism\n :param eps: the privacy budget to use for this step.\n '
errors = np.array([])
for cl in workload:
bias = (est.domain.size(cl) if penalty else 0)
x = workload_answers[cl]
xest = est.project(cl).datavector()
errors = np.append(errors, (np.abs((x - xest)).sum() - bias))
sensitivity = (2.0 if bounded else 1.0)
prob = softmax((((0.5 * eps) / sensitivity) * (errors - errors.max())))
key = np.random.choice(len(errors), p=prob)
return workload[key]
|
def mwem_pgm(data, epsilon, delta=0.0, workload=None, rounds=None, maxsize_mb=25, pgm_iters=1000, noise='gaussian', bounded=False, alpha=0.9):
'\n Implementation of MWEM + PGM\n\n :param data: an mbi.Dataset object\n :param epsilon: privacy budget\n :param delta: privacy parameter (ignored)\n :param workload: A list of cliques (attribute tuples) to include in the workload (default: all pairs of attributes)\n :param rounds: The number of rounds of MWEM to run (default: number of attributes)\n :param maxsize_mb: [New] a limit on the size of the model (in megabytes), used to filter out candidate cliques from selection.\n Used to avoid MWEM+PGM failure modes (intractable model sizes). \n Set to np.inf if you would like to run MWEM as originally described without this modification \n (Note it may exceed resource limits if run for too many rounds)\n\n Implementation Notes:\n - During each round of MWEM, one clique will be selected for measurement, but only if measuring the clique does\n not increase size of the graphical model too much\n '
if (workload is None):
workload = list(itertools.combinations(data.domain, 2))
if (rounds is None):
rounds = len(data.domain)
if (noise == 'laplace'):
eps_per_round = (epsilon / rounds)
sigma = (1.0 / (alpha * eps_per_round))
exp_eps = ((1 - alpha) * eps_per_round)
marginal_sensitivity = (2 if bounded else 1.0)
else:
rho = cdp_rho(epsilon, delta)
rho_per_round = (rho / rounds)
sigma = np.sqrt((0.5 / (alpha * rho_per_round)))
exp_eps = np.sqrt(((8 * (1 - alpha)) * rho_per_round))
marginal_sensitivity = (np.sqrt(2) if bounded else 1.0)
domain = data.domain
total = (data.records if bounded else None)
def size(cliques):
return ((GraphicalModel(domain, cliques).size * 8) / (2 ** 20))
workload_answers = {cl: data.project(cl).datavector() for cl in workload}
engine = FactoredInference(data.domain, log=False, iters=pgm_iters, warm_start=True)
measurements = []
est = engine.estimate(measurements, total)
cliques = []
for i in range(1, (rounds + 1)):
candidates = [cl for cl in workload if (size((cliques + [cl])) <= ((maxsize_mb * i) / rounds))]
ax = worst_approximated(workload_answers, est, candidates, exp_eps)
print('Round', i, 'Selected', ax, 'Model Size (MB)', ((est.size * 8) / (2 ** 20)))
n = domain.size(ax)
x = data.project(ax).datavector()
if (noise == 'laplace'):
y = (x + np.random.laplace(loc=0, scale=(marginal_sensitivity * sigma), size=n))
else:
y = (x + np.random.normal(loc=0, scale=(marginal_sensitivity * sigma), size=n))
Q = sparse.eye(n)
measurements.append((Q, y, 1.0, ax))
est = engine.estimate(measurements, total)
cliques.append(ax)
print('Generating Data...')
return est.synthetic_data()
|
def default_params():
'\n Return default parameters to run this program\n\n :returns: a dictionary of default parameter settings for each command line argument\n '
params = {}
params['dataset'] = '../data/adult.csv'
params['domain'] = '../data/adult-domain.json'
params['epsilon'] = 1.0
params['delta'] = 1e-09
params['rounds'] = None
params['noise'] = 'gaussian'
params['max_model_size'] = 25
params['pgm_iters'] = 1000
params['degree'] = 2
params['num_marginals'] = None
params['max_cells'] = 10000
return params
|
class CallBack():
' A CallBack is a function called after every iteration of an iterative optimization procedure\n It is useful for tracking loss and other metrics over time.\n '
def __init__(self, engine, frequency=50):
' Initialize the callback objet\n\n :param engine: the FactoredInference object that is performing the optimization\n :param frequency: the number of iterations to perform before computing the callback function\n '
self.engine = engine
self.frequency = frequency
self.calls = 0
def run(self, marginals):
pass
def __call__(self, marginals):
if (self.calls == 0):
self.start = time.time()
if ((self.calls % self.frequency) == 0):
self.run(marginals)
self.calls += 1
|
class Logger(CallBack):
' Logger is the default callback function. It tracks the time, L1 loss, L2 loss, and\n optionally the total variation distance to the true query answers (when available).\n The last is for debugging purposes only - in practice the true answers can not be observed.\n '
def __init__(self, engine, true_answers=None, frequency=50):
' Initialize the callback objet\n\n :param engine: the FactoredInference object that is performing the optimization\n :param true_answers: a dictionary containing true answers to the measurement queries.\n :param frequency: the number of iterations to perform before computing the callback function\n '
CallBack.__init__(self, engine, frequency)
self.true_answers = true_answers
self.idx = 0
def setup(self):
model = self.engine.model
total = sum((model.domain.size(cl) for cl in model.cliques))
print('Total clique size:', total, flush=True)
cols = ['iteration', 'time', 'l1_loss', 'l2_loss', 'feasibility']
if (self.true_answers is not None):
cols.append('variation')
self.results = pd.DataFrame(columns=cols)
print('\t\t'.join(cols), flush=True)
def variational_distances(self, marginals):
errors = []
for (Q, y, proj) in self.true_answers:
for cl in marginals:
if (set(proj) <= set(cl)):
mu = marginals[cl].project(proj)
x = mu.values.flatten()
diff = (Q.dot(x) - y)
err = ((0.5 * np.abs(diff).sum()) / y.sum())
errors.append(err)
break
return errors
def primal_feasibility(self, mu):
ans = 0
count = 0
for r in mu:
for s in mu:
if (r == s):
break
d = tuple((set(r) & set(s)))
if (len(d) > 0):
x = mu[r].project(d).datavector()
y = mu[s].project(d).datavector()
err = np.linalg.norm((x - y), 1)
ans += err
count += 1
try:
return (ans / count)
except:
return 0
def run(self, marginals):
if (self.idx == 0):
self.setup()
t = (time.time() - self.start)
l1_loss = self.engine._marginal_loss(marginals, metric='L1')[0]
l2_loss = self.engine._marginal_loss(marginals, metric='L2')[0]
feasibility = self.primal_feasibility(marginals)
row = [self.calls, t, l1_loss, l2_loss, feasibility]
if (self.true_answers is not None):
variational = np.mean(self.variational_distances(marginals))
row.append((100 * variational))
self.results.loc[self.idx] = row
self.idx += 1
print('\t\t'.join([('%.2f' % v) for v in row]), flush=True)
|
class CliqueVector(dict):
' This is a convenience class for simplifying arithmetic over the \n concatenated vector of marginals and potentials.\n\n These vectors are represented as a dictionary mapping cliques (subsets of attributes)\n to marginals/potentials (Factor objects)\n '
def __init__(self, dictionary):
self.dictionary = dictionary
dict.__init__(self, dictionary)
@staticmethod
def zeros(domain, cliques):
from mbi import Factor
return CliqueVector({cl: Factor.zeros(domain.project(cl)) for cl in cliques})
@staticmethod
def ones(domain, cliques):
from mbi import Factor
return CliqueVector({cl: Factor.ones(domain.project(cl)) for cl in cliques})
@staticmethod
def uniform(domain, cliques):
from mbi import Factor
return CliqueVector({cl: Factor.uniform(domain.project(cl)) for cl in cliques})
@staticmethod
def random(domain, cliques, prng=np.random):
from mbi import Factor
return CliqueVector({cl: Factor.random(domain.project(cl), prng) for cl in cliques})
@staticmethod
def normal(domain, cliques, prng=np.random):
from mbi import Factor
return CliqueVector({cl: Factor.normal(domain.project(cl), prng) for cl in cliques})
@staticmethod
def from_data(data, cliques):
from mbi import Factor
ans = {}
for cl in cliques:
mu = data.project(cl)
ans[cl] = Factor(mu.domain, mu.datavector())
return CliqueVector(ans)
def combine(self, other):
for cl in other:
for cl2 in self:
if (set(cl) <= set(cl2)):
self[cl2] += other[cl]
break
def __mul__(self, const):
ans = {cl: (const * self[cl]) for cl in self}
return CliqueVector(ans)
def __rmul__(self, const):
return self.__mul__(const)
def __add__(self, other):
if np.isscalar(other):
ans = {cl: (self[cl] + other) for cl in self}
else:
ans = {cl: (self[cl] + other[cl]) for cl in self}
return CliqueVector(ans)
def __sub__(self, other):
return (self + ((- 1) * other))
def exp(self):
ans = {cl: self[cl].exp() for cl in self}
return CliqueVector(ans)
def log(self):
ans = {cl: self[cl].log() for cl in self}
return CliqueVector(ans)
def dot(self, other):
return sum(((self[cl] * other[cl]).sum() for cl in self))
def size(self):
return sum((self[cl].domain.size() for cl in self))
|
class Domain():
def __init__(self, attrs, shape):
' Construct a Domain object\n \n :param attrs: a list or tuple of attribute names\n :param shape: a list or tuple of domain sizes for each attribute\n '
assert (len(attrs) == len(shape)), 'dimensions must be equal'
self.attrs = tuple(attrs)
self.shape = tuple(shape)
self.config = dict(zip(attrs, shape))
@staticmethod
def fromdict(config):
' Construct a Domain object from a dictionary of { attr : size } values '
return Domain(config.keys(), config.values())
def project(self, attrs):
' project the domain onto a subset of attributes\n \n :param attrs: the attributes to project onto\n :return: the projected Domain object\n '
if (type(attrs) is str):
attrs = [attrs]
shape = tuple((self.config[a] for a in attrs))
return Domain(attrs, shape)
def marginalize(self, attrs):
' marginalize out some attributes from the domain (opposite of project)\n\n :param attrs: the attributes to marginalize out\n :return: the marginalized Domain object\n '
proj = [a for a in self.attrs if (not (a in attrs))]
return self.project(proj)
def axes(self, attrs):
' return the axes tuple for the given attributes\n\n :param attrs: the attributes\n :return: a tuple with the corresponding axes\n '
return tuple((self.attrs.index(a) for a in attrs))
def transpose(self, attrs):
' reorder the attributes in the domain object '
return self.project(attrs)
def invert(self, attrs):
' returns the attributes in the domain not in the list '
return [a for a in self.attrs if (a not in attrs)]
def merge(self, other):
" merge this domain object with another\n\n :param other: another Domain object\n :return: a new domain object covering the full domain\n\n Example:\n >>> D1 = Domain(['a','b'], [10,20])\n >>> D2 = Domain(['b','c'], [20,30])\n >>> D1.merge(D2)\n Domain(['a','b','c'], [10,20,30])\n "
extra = other.marginalize(self.attrs)
return Domain((self.attrs + extra.attrs), (self.shape + extra.shape))
def contains(self, other):
' determine if this domain contains another\n\n '
return (set(other.attrs) <= set(self.attrs))
def size(self, attrs=None):
' return the total size of the domain '
if (attrs == None):
return reduce((lambda x, y: (x * y)), self.shape, 1)
return self.project(attrs).size()
def sort(self, how='size'):
' return a new domain object, sorted by attribute size or attribute name '
if (how == 'size'):
attrs = sorted(self.attrs, key=self.size)
elif (how == 'name'):
attrs = sorted(self.attrs)
return self.project(attrs)
def canonical(self, attrs):
' return the canonical ordering of the attributes '
return tuple((a for a in self.attrs if (a in attrs)))
def __contains__(self, attr):
return (attr in self.attrs)
def __getitem__(self, a):
' return the size of an individual attribute\n :param a: the attribute\n '
return self.config[a]
def __iter__(self):
' iterator for the attributes in the domain '
return self.attrs.__iter__()
def __len__(self):
return len(self.attrs)
def __eq__(self, other):
return ((self.attrs == other.attrs) and (self.shape == other.shape))
def __repr__(self):
inner = ', '.join([('%s: %d' % x) for x in zip(self.attrs, self.shape)])
return ('Domain(%s)' % inner)
def __str__(self):
return self.__repr__()
|
class Factor():
def __init__(self, domain, values):
' Initialize a factor over the given domain\n\n :param domain: the domain of the factor\n :param values: the ndarray of factor values (for each element of the domain)\n\n Note: values may be a flattened 1d array or a ndarray with same shape as domain\n '
assert (domain.size() == values.size), 'domain size does not match values size'
assert ((values.ndim == 1) or (values.shape == domain.shape)), 'invalid shape for values array'
self.domain = domain
self.values = values.reshape(domain.shape)
@staticmethod
def zeros(domain):
return Factor(domain, np.zeros(domain.shape))
@staticmethod
def ones(domain):
return Factor(domain, np.ones(domain.shape))
@staticmethod
def random(domain):
return Factor(domain, np.random.rand(*domain.shape))
@staticmethod
def uniform(domain):
return (Factor.ones(domain) / domain.size())
@staticmethod
def active(domain, structural_zeros):
" create a factor that is 0 everywhere except in positions present in \n 'structural_zeros', where it is -infinity\n\n :param: domain: the domain of this factor\n :param: structural_zeros: a list of values that are not possible\n "
idx = tuple(np.array(structural_zeros).T)
vals = np.zeros(domain.shape)
vals[idx] = (- np.inf)
return Factor(domain, vals)
def expand(self, domain):
assert domain.contains(self.domain), 'expanded domain must contain current domain'
dims = (len(domain) - len(self.domain))
values = self.values.reshape((self.domain.shape + tuple(([1] * dims))))
ax = domain.axes(self.domain.attrs)
values = np.moveaxis(values, range(len(ax)), ax)
values = np.broadcast_to(values, domain.shape)
return Factor(domain, values)
def transpose(self, attrs):
assert (set(attrs) == set(self.domain.attrs)), 'attrs must be same as domain attributes'
newdom = self.domain.project(attrs)
ax = newdom.axes(self.domain.attrs)
values = np.moveaxis(self.values, range(len(ax)), ax)
return Factor(newdom, values)
def project(self, attrs, agg='sum'):
' \n project the factor onto a list of attributes (in order)\n using either sum or logsumexp to aggregate along other attributes\n '
assert (agg in ['sum', 'logsumexp']), 'agg must be sum or logsumexp'
marginalized = self.domain.marginalize(attrs)
if (agg == 'sum'):
ans = self.sum(marginalized.attrs)
elif (agg == 'logsumexp'):
ans = self.logsumexp(marginalized.attrs)
return ans.transpose(attrs)
def sum(self, attrs=None):
if (attrs is None):
return np.sum(self.values)
axes = self.domain.axes(attrs)
values = np.sum(self.values, axis=axes)
newdom = self.domain.marginalize(attrs)
return Factor(newdom, values)
def logsumexp(self, attrs=None):
if (attrs is None):
return logsumexp(self.values)
axes = self.domain.axes(attrs)
values = logsumexp(self.values, axis=axes)
newdom = self.domain.marginalize(attrs)
return Factor(newdom, values)
def logaddexp(self, other):
newdom = self.domain.merge(other.domain)
factor1 = self.expand(newdom)
factor2 = self.expand(newdom)
return Factor(newdom, np.logaddexp(factor1.values, factor2.values))
def max(self, attrs=None):
if (attrs is None):
return self.values.max()
axes = self.domain.axes(attrs)
values = np.max(self.values, axis=axes)
newdom = self.domain.marginalize(attrs)
return Factor(newdom, values)
def condition(self, evidence):
' evidence is a dictionary where \n keys are attributes, and \n values are elements of the domain for that attribute '
slices = [(evidence[a] if (a in evidence) else slice(None)) for a in self.domain]
newdom = self.domain.marginalize(evidence.keys())
values = self.values[tuple(slices)]
return Factor(newdom, values)
def copy(self, out=None):
if (out is None):
return Factor(self.domain, self.values.copy())
np.copyto(out.values, self.values)
return out
def __mul__(self, other):
if np.isscalar(other):
new_values = np.nan_to_num((other * self.values))
return Factor(self.domain, new_values)
newdom = self.domain.merge(other.domain)
factor1 = self.expand(newdom)
factor2 = other.expand(newdom)
return Factor(newdom, (factor1.values * factor2.values))
def __add__(self, other):
if np.isscalar(other):
return Factor(self.domain, (other + self.values))
newdom = self.domain.merge(other.domain)
factor1 = self.expand(newdom)
factor2 = other.expand(newdom)
return Factor(newdom, (factor1.values + factor2.values))
def __iadd__(self, other):
if np.isscalar(other):
self.values += other
return self
factor2 = other.expand(self.domain)
self.values += factor2.values
return self
def __imul__(self, other):
if np.isscalar(other):
self.values *= other
return self
factor2 = other.expand(self.domain)
self.values *= factor2.values
return self
def __radd__(self, other):
return self.__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __sub__(self, other):
if np.isscalar(other):
return Factor(self.domain, (self.values - other))
other = Factor(other.domain, np.where((other.values == (- np.inf)), 0, (- other.values)))
return (self + other)
def __truediv__(self, other):
if np.isscalar(other):
new_values = (self.values / other)
new_values = np.nan_to_num(new_values)
return Factor(self.domain, new_values)
tmp = other.expand(self.domain)
vals = np.divide(self.values, tmp.values, where=(tmp.values > 0))
vals[(tmp.values <= 0)] = 0.0
return Factor(self.domain, vals)
def exp(self, out=None):
if (out is None):
return Factor(self.domain, np.exp(self.values))
np.exp(self.values, out=out.values)
return out
def log(self, out=None):
if (out is None):
return Factor(self.domain, np.log((self.values + 1e-100)))
np.log(self.values, out=out.values)
return out
def datavector(self, flatten=True):
' Materialize the data vector '
if flatten:
return self.values.flatten()
return self.values
|
class FactorGraph():
def __init__(self, domain, cliques, total=1.0, convex=False, iters=25):
self.domain = domain
self.cliques = cliques
self.total = total
self.convex = convex
self.iters = iters
if convex:
self.counting_numbers = self.get_counting_numbers()
self.belief_propagation = self.convergent_belief_propagation
else:
counting_numbers = {}
for cl in cliques:
counting_numbers[cl] = 1.0
for a in domain:
counting_numbers[a] = (1.0 - len([cl for cl in cliques if (a in cl)]))
self.counting_numbers = (None, None, counting_numbers)
self.belief_propagation = self.loopy_belief_propagation
self.potentials = None
self.marginals = None
self.messages = self.init_messages()
self.beliefs = {i: Factor.zeros(domain.project(i)) for i in domain}
def datavector(self, flatten=True):
' Materialize the explicit representation of the distribution as a data vector. '
logp = sum((self.potentials[cl] for cl in self.cliques))
ans = np.exp((logp - logp.logsumexp()))
wgt = (ans.domain.size() / self.domain.size())
return ((ans.expand(self.domain).datavector(flatten) * wgt) * self.total)
def init_messages(self):
mu_n = defaultdict(dict)
mu_f = defaultdict(dict)
for cl in self.cliques:
for v in cl:
mu_f[cl][v] = Factor.zeros(self.domain.project(v))
mu_n[v][cl] = Factor.zeros(self.domain.project(v))
return (mu_n, mu_f)
def primal_feasibility(self, mu):
ans = 0
count = 0
for r in mu:
for s in mu:
if (r == s):
break
d = tuple((set(r) & set(s)))
if (len(d) > 0):
x = mu[r].project(d).datavector()
y = mu[s].project(d).datavector()
err = np.linalg.norm((x - y), 1)
ans += err
count += 1
try:
return (ans / count)
except:
return 0
def project(self, attrs):
if (type(attrs) is list):
attrs = tuple(attrs)
if (self.marginals is not None):
ans = Factor.zeros(self.domain.project(attrs))
terminate = False
for cl in self.cliques:
if (set(attrs) <= set(cl)):
ans += self.marginals[cl].project(attrs)
terminate = True
if terminate:
return (ans * (self.total / ans.sum()))
belief = sum((self.beliefs[i] for i in attrs))
belief += (np.log(self.total) - belief.logsumexp())
return belief.transpose(attrs).exp()
def loopy_belief_propagation(self, potentials, callback=None):
(mu_n, mu_f) = self.messages
self.potentials = potentials
for i in range(self.iters):
for cl in self.cliques:
pre = sum((mu_n[c][cl] for c in cl))
for v in cl:
complement = [var for var in cl if (var is not v)]
mu_f[cl][v] = ((potentials[cl] + pre) - mu_n[v][cl])
mu_f[cl][v] = mu_f[cl][v].logsumexp(complement)
mu_f[cl][v] -= mu_f[cl][v].logsumexp()
for v in self.domain:
fac = [cl for cl in self.cliques if (v in cl)]
pre = sum((mu_f[cl][v] for cl in fac))
for f in fac:
complement = [var for var in fac if (var is not f)]
mu_n[v][f] = (pre - mu_f[f][v])
if (callback is not None):
mg = self.clique_marginals(mu_n, mu_f, potentials)
callback(mg)
self.beliefs = {v: sum((mu_f[cl][v] for cl in self.cliques if (v in cl))) for v in self.domain}
self.messages = (mu_n, mu_f)
self.marginals = self.clique_marginals(mu_n, mu_f, potentials)
return self.marginals
def convergent_belief_propagation(self, potentials, callback=None):
(v, vhat, k) = self.counting_numbers
(sigma, delta) = self.messages
for it in range(self.iters):
for i in self.domain:
nbrs = [r for r in self.cliques if (i in r)]
for r in nbrs:
comp = [j for j in r if (i != j)]
delta[r][i] = (potentials[r] + sum((sigma[j][r] for j in comp)))
delta[r][i] /= vhat[(i, r)]
delta[r][i] = delta[r][i].logsumexp(comp)
belief = Factor.zeros(self.domain.project(i))
belief += (sum(((delta[r][i] * vhat[(i, r)]) for r in nbrs)) / vhat[i])
belief -= belief.logsumexp()
self.beliefs[i] = belief
for r in nbrs:
comp = [j for j in r if (i != j)]
A = ((- v[(i, r)]) / vhat[(i, r)])
B = v[r]
sigma[i][r] = (A * (potentials[r] + sum((sigma[j][r] for j in comp))))
sigma[i][r] += (B * (belief - delta[r][i]))
if (callback is not None):
mg = self.clique_marginals(sigma, delta, potentials)
callback(mg)
self.messages = (sigma, delta)
return self.clique_marginals(sigma, delta, potentials)
def clique_marginals(self, mu_n, mu_f, potentials):
if self.convex:
(v, _, _) = self.counting_numbers
marginals = {}
for cl in self.cliques:
belief = (potentials[cl] + sum((mu_n[n][cl] for n in cl)))
if self.convex:
belief *= (1.0 / v[cl])
belief += (np.log(self.total) - belief.logsumexp())
marginals[cl] = belief.exp()
return CliqueVector(marginals)
def mle(self, marginals):
return (- self.bethe_entropy(marginals)[1])
def bethe_entropy(self, marginals):
'\n Return the Bethe Entropy and the gradient with respect to the marginals\n \n '
(_, _, weights) = self.counting_numbers
entropy = 0
dmarginals = {}
attributes = set()
for cl in self.cliques:
mu = (marginals[cl] / self.total)
entropy += (weights[cl] * (mu * mu.log()).sum())
dmarginals[cl] = ((weights[cl] * (1 + mu.log())) / self.total)
for a in (set(cl) - set(attributes)):
p = mu.project(a)
entropy += (weights[a] * (p * p.log()).sum())
dmarginals[cl] += ((weights[a] * (1 + p.log())) / self.total)
attributes.update(a)
return ((- entropy), ((- 1) * CliqueVector(dmarginals)))
def get_counting_numbers(self):
from cvxopt import solvers, matrix
solvers.options['show_progress'] = False
index = {}
idx = 0
for i in self.domain:
index[i] = idx
idx += 1
for r in self.cliques:
index[r] = idx
idx += 1
for r in self.cliques:
for i in r:
index[(r, i)] = idx
idx += 1
vectors = {}
for r in self.cliques:
v = np.zeros(idx)
v[index[r]] = 1
for i in r:
v[index[(r, i)]] = 1
vectors[r] = v
for i in self.domain:
v = np.zeros(idx)
v[index[i]] = 1
for r in self.cliques:
if (i in r):
v[index[(r, i)]] = (- 1)
vectors[i] = v
constraints = []
for i in self.domain:
con = vectors[i].copy()
for r in self.cliques:
if (i in r):
con += vectors[r]
constraints.append(con)
A = np.array(constraints)
b = np.ones(len(self.domain))
X = np.vstack([vectors[r] for r in self.cliques])
y = np.ones(len(self.cliques))
P = (X.T @ X)
q = ((- X.T) @ y)
G = (- np.eye(q.size))
h = np.zeros(q.size)
minBound = (1.0 / len(self.domain))
for r in self.cliques:
h[index[r]] = (- minBound)
P = matrix(P)
q = matrix(q)
G = matrix(G)
h = matrix(h)
A = matrix(A)
b = matrix(b)
ans = solvers.qp(P, q, G, h, A, b)
x = np.array(ans['x']).flatten()
counting_v = {}
for r in self.cliques:
counting_v[r] = x[index[r]]
for i in r:
counting_v[(i, r)] = x[index[(r, i)]]
for i in self.domain:
counting_v[i] = x[index[i]]
counting_vhat = {}
counting_k = {}
for i in self.domain:
nbrs = [r for r in self.cliques if (i in r)]
counting_vhat[i] = (counting_v[i] + sum((counting_v[r] for r in nbrs)))
counting_k[i] = (counting_v[i] - sum((counting_v[(i, r)] for r in nbrs)))
for r in nbrs:
counting_vhat[(i, r)] = (counting_v[r] + counting_v[(i, r)])
for r in self.cliques:
counting_k[r] = (counting_v[r] + sum((counting_v[(i, r)] for i in r)))
return (counting_v, counting_vhat, counting_k)
|
class FactoredInference():
def __init__(self, domain, backend='numpy', structural_zeros={}, metric='L2', log=False, iters=1000, warm_start=False, elim_order=None):
'\n Class for learning a GraphicalModel from noisy measurements on a data distribution\n \n :param domain: The domain information (A Domain object)\n :param backend: numpy or torch backend\n :param structural_zeros: An encoding of the known (structural) zeros in the distribution.\n Specified as a dictionary where \n - each key is a subset of attributes of size r\n - each value is a list of r-tuples corresponding to impossible attribute settings\n :param metric: The optimization metric. May be L1, L2 or a custom callable function\n - custom callable function must consume the marginals and produce the loss and gradient\n - see FactoredInference._marginal_loss for more information\n :param log: flag to log iterations of optimization\n :param iters: number of iterations to optimize for\n :param warm_start: initialize new model or reuse last model when calling infer multiple times\n :param elim_order: an elimination order for the JunctionTree algorithm\n - Elimination order will impact the efficiency by not correctness. \n By default, a greedy elimination order is used\n '
self.domain = domain
self.backend = backend
self.metric = metric
self.log = log
self.iters = iters
self.warm_start = warm_start
self.history = []
self.elim_order = elim_order
if (backend == 'torch'):
from mbi.torch_factor import Factor
self.Factor = Factor
else:
from mbi import Factor
self.Factor = Factor
self.structural_zeros = CliqueVector({})
for cl in structural_zeros:
dom = self.domain.project(cl)
fact = structural_zeros[cl]
self.structural_zeros[cl] = self.Factor.active(dom, fact)
def estimate(self, measurements, total=None, engine='MD', callback=None, options={}):
' \n Estimate a GraphicalModel from the given measurements\n\n :param measurements: a list of (Q, y, noise, proj) tuples, where\n Q is the measurement matrix (a numpy array or scipy sparse matrix or LinearOperator)\n y is the noisy answers to the measurement queries\n noise is the standard deviation of the noise added to y\n proj defines the marginal used for this measurement set (a subset of attributes)\n :param total: The total number of records (if known)\n :param engine: the optimization algorithm to use, options include:\n MD - Mirror Descent with armijo line search\n RDA - Regularized Dual Averaging\n IG - Interior Gradient\n :param callback: a function to be called after each iteration of optimization\n :param options: solver specific options passed as a dictionary\n { param_name : param_value }\n \n :return model: A GraphicalModel that best matches the measurements taken\n '
measurements = self.fix_measurements(measurements)
options['callback'] = callback
if ((callback is None) and self.log):
options['callback'] = callbacks.Logger(self)
if (engine == 'MD'):
self.mirror_descent(measurements, total, **options)
elif (engine == 'RDA'):
self.dual_averaging(measurements, total, **options)
elif (engine == 'IG'):
self.interior_gradient(measurements, total, **options)
return self.model
def fix_measurements(self, measurements):
assert (type(measurements) is list), ('measurements must be a list, given ' + measurements)
assert all(((len(m) == 4) for m in measurements)), 'each measurement must be a 4-tuple (Q, y, noise,proj)'
ans = []
for (Q, y, noise, proj) in measurements:
assert ((Q is None) or (Q.shape[0] == y.size)), 'shapes of Q and y are not compatible'
if (type(proj) is list):
proj = tuple(proj)
if (type(proj) is not tuple):
proj = (proj,)
if (Q is None):
Q = sparse.eye(self.domain.size(proj))
assert np.isscalar(noise), ('noise must be a real value, given ' + str(noise))
assert all(((a in self.domain) for a in proj)), (str(proj) + ' not contained in domain')
assert (Q.shape[1] == self.domain.size(proj)), 'shapes of Q and proj are not compatible'
ans.append((Q, y, noise, proj))
return ans
def interior_gradient(self, measurements, total, lipschitz=None, c=1, sigma=1, callback=None):
" Use the interior gradient algorithm to estimate the GraphicalModel\n See https://epubs.siam.org/doi/pdf/10.1137/S1052623403427823 for more information\n\n :param measurements: a list of (Q, y, noise, proj) tuples, where\n Q is the measurement matrix (a numpy array or scipy sparse matrix or LinearOperator)\n y is the noisy answers to the measurement queries\n noise is the standard deviation of the noise added to y\n proj defines the marginal used for this measurement set (a subset of attributes)\n :param total: The total number of records (if known)\n :param lipschitz: the Lipchitz constant of grad L(mu)\n - automatically calculated for metric=L2\n - doesn't exist for metric=L1\n - must be supplied for custom callable metrics\n :param c, sigma: parameters of the algorithm\n :param callback: a function to be called after each iteration of optimization\n "
assert (self.metric != 'L1'), 'dual_averaging cannot be used with metric=L1'
assert ((not callable(self.metric)) or (lipschitz is not None)), 'lipschitz constant must be supplied'
self._setup(measurements, total)
model = self.model
(domain, cliques, total) = (model.domain, model.cliques, model.total)
L = (self._lipschitz(measurements) if (lipschitz is None) else lipschitz)
if self.log:
print('Lipchitz constant:', L)
theta = model.potentials
x = y = z = model.belief_propagation(theta)
c0 = c
l = (sigma / L)
for k in range(1, (self.iters + 1)):
a = ((np.sqrt((((c * l) ** 2) + ((4 * c) * l))) - (l * c)) / 2)
y = (((1 - a) * x) + (a * z))
c *= (1 - a)
(_, g) = self._marginal_loss(y)
theta = (theta - (((a / c) / total) * g))
z = model.belief_propagation(theta)
x = (((1 - a) * x) + (a * z))
if (callback is not None):
callback(x)
model.marginals = x
model.potentials = model.mle(x)
def dual_averaging(self, measurements, total=None, lipschitz=None, callback=None):
" Use the regularized dual averaging algorithm to estimate the GraphicalModel\n See https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/xiao10JMLR.pdf\n\n :param measurements: a list of (Q, y, noise, proj) tuples, where\n Q is the measurement matrix (a numpy array or scipy sparse matrix or LinearOperator)\n y is the noisy answers to the measurement queries\n noise is the standard deviation of the noise added to y\n proj defines the marginal used for this measurement set (a subset of attributes)\n :param total: The total number of records (if known)\n :param lipschitz: the Lipchitz constant of grad L(mu)\n - automatically calculated for metric=L2\n - doesn't exist for metric=L1\n - must be supplied for custom callable metrics\n :param callback: a function to be called after each iteration of optimization\n "
assert (self.metric != 'L1'), 'dual_averaging cannot be used with metric=L1'
assert ((not callable(self.metric)) or (lipschitz is not None)), 'lipschitz constant must be supplied'
self._setup(measurements, total)
model = self.model
(domain, cliques, total) = (model.domain, model.cliques, model.total)
L = (self._lipschitz(measurements) if (lipschitz is None) else lipschitz)
print('Lipchitz constant:', L)
if (L == 0):
return
theta = model.potentials
gbar = CliqueVector({cl: self.Factor.zeros(domain.project(cl)) for cl in cliques})
w = v = model.belief_propagation(theta)
beta = 0
for t in range(1, (self.iters + 1)):
c = (2.0 / (t + 1))
u = (((1 - c) * w) + (c * v))
(_, g) = self._marginal_loss(u)
gbar = (((1 - c) * gbar) + (c * g))
theta = (((((- t) * (t + 1)) / ((4 * L) + beta)) / self.model.total) * gbar)
v = model.belief_propagation(theta)
w = (((1 - c) * w) + (c * v))
if (callback is not None):
callback(w)
model.marginals = w
model.potentials = model.mle(w)
def mirror_descent(self, measurements, total=None, stepsize=None, callback=None):
' Use the mirror descent algorithm to estimate the GraphicalModel\n See https://web.iem.technion.ac.il/images/user-files/becka/papers/3.pdf\n \n :param measurements: a list of (Q, y, noise, proj) tuples, where\n Q is the measurement matrix (a numpy array or scipy sparse matrix or LinearOperator)\n y is the noisy answers to the measurement queries\n noise is the standard deviation of the noise added to y\n proj defines the marginal used for this measurement set (a subset of attributes)\n :param stepsize: The step size function for the optimization (None or scalar or function)\n if None, will perform line search at each iteration (requires smooth objective)\n if scalar, will use constant step size\n if function, will be called with the iteration number\n :param total: The total number of records (if known)\n :param callback: a function to be called after each iteration of optimization\n '
assert (not ((self.metric == 'L1') and (stepsize is None))), 'loss function not smooth, cannot use line search (specify stepsize)'
self._setup(measurements, total)
model = self.model
(cliques, theta) = (model.cliques, model.potentials)
mu = model.belief_propagation(theta)
ans = self._marginal_loss(mu)
if (ans[0] == 0):
return ans[0]
nols = (stepsize is not None)
if np.isscalar(stepsize):
alpha = float(stepsize)
stepsize = (lambda t: alpha)
if (stepsize is None):
alpha = (1.0 / (self.model.total ** 2))
stepsize = (lambda t: (2.0 * alpha))
for t in range(1, (self.iters + 1)):
if (callback is not None):
callback(mu)
(omega, nu) = (theta, mu)
(curr_loss, dL) = ans
alpha = stepsize(t)
for i in range(25):
theta = (omega - (alpha * dL))
mu = model.belief_propagation(theta)
ans = self._marginal_loss(mu)
if (nols or ((curr_loss - ans[0]) >= ((0.5 * alpha) * dL.dot((nu - mu))))):
break
alpha *= 0.5
model.potentials = theta
model.marginals = mu
return ans[0]
def _marginal_loss(self, marginals, metric=None):
' Compute the loss and gradient for a given dictionary of marginals\n\n :param marginals: A dictionary with keys as projections and values as Factors\n :return loss: the loss value\n :return grad: A dictionary with gradient for each marginal \n '
if (metric is None):
metric = self.metric
if callable(metric):
return metric(marginals)
loss = 0.0
gradient = {}
for cl in marginals:
mu = marginals[cl]
gradient[cl] = self.Factor.zeros(mu.domain)
for (Q, y, noise, proj) in self.groups[cl]:
c = (1.0 / noise)
mu2 = mu.project(proj)
x = mu2.datavector()
diff = (c * ((Q @ x) - y))
if (metric == 'L1'):
loss += abs(diff).sum()
sign = (diff.sign() if hasattr(diff, 'sign') else np.sign(diff))
grad = (c * (Q.T @ sign))
else:
loss += (0.5 * (diff @ diff))
grad = (c * (Q.T @ diff))
gradient[cl] += self.Factor(mu2.domain, grad)
return (float(loss), CliqueVector(gradient))
def _setup(self, measurements, total):
' Perform necessary setup for running estimation algorithms\n \n 1. If total is None, find the minimum variance unbiased estimate for total and use that\n 2. Construct the GraphicalModel \n * If there are structural_zeros in the distribution, initialize factors appropriately\n 3. Pre-process measurements into groups so that _marginal_loss may be evaluated efficiently\n '
if (total is None):
variances = np.array([])
estimates = np.array([])
for (Q, y, noise, proj) in measurements:
o = np.ones(Q.shape[1])
v = lsmr(Q.T, o, atol=0, btol=0)[0]
if np.allclose(Q.T.dot(v), o):
variances = np.append(variances, ((noise ** 2) * np.dot(v, v)))
estimates = np.append(estimates, np.dot(v, y))
if (estimates.size == 0):
total = 1
else:
variance = (1.0 / np.sum((1.0 / variances)))
estimate = (variance * np.sum((estimates / variances)))
total = max(1, estimate)
cliques = [m[3] for m in measurements]
if (self.structural_zeros is not None):
cliques += list(self.structural_zeros.keys())
model = GraphicalModel(self.domain, cliques, total, elimination_order=self.elim_order)
model.potentials = CliqueVector.zeros(self.domain, model.cliques)
model.potentials.combine(self.structural_zeros)
if (self.warm_start and hasattr(self, 'model')):
model.potentials.combine(self.model.potentials)
self.model = model
cliques = self.model.cliques
self.groups = defaultdict((lambda : []))
for (Q, y, noise, proj) in measurements:
if (self.backend == 'torch'):
import torch
device = self.Factor.device
y = torch.tensor(y, dtype=torch.float32, device=device)
if isinstance(Q, np.ndarray):
Q = torch.tensor(Q, dtype=torch.float32, device=device)
elif sparse.issparse(Q):
Q = Q.tocoo()
idx = torch.LongTensor([Q.row, Q.col])
vals = torch.FloatTensor(Q.data)
Q = torch.sparse.FloatTensor(idx, vals).to(device)
m = (Q, y, noise, proj)
for cl in sorted(cliques, key=model.domain.size):
if (set(proj) <= set(cl)):
self.groups[cl].append(m)
break
def _lipschitz(self, measurements):
' compute lipschitz constant for L2 loss \n\n Note: must be called after _setup\n '
eigs = {cl: 0.0 for cl in self.model.cliques}
for (Q, _, noise, proj) in measurements:
for cl in self.model.cliques:
if (set(proj) <= set(cl)):
n = self.domain.size(cl)
p = self.domain.size(proj)
Q = aslinearoperator(Q)
Q.dtype = np.dtype(Q.dtype)
eig = eigsh((Q.H * Q), 1)[0][0]
eigs[cl] += (((eig * n) / p) / (noise ** 2))
break
return max(eigs.values())
def infer(self, measurements, total=None, engine='MD', callback=None, options={}):
import warnings
message = 'Function infer is deprecated. Please use estimate instead.'
warnings.warn(message, DeprecationWarning)
return self.estimate(measurements, total, engine, callback, options)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.