text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
############################################################################
# Copyright (c) 2015-2016 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import stat
import sys
import logging
import glob
import re
import gzip
import tempfile
import shutil
import options_storage
import itertools
from os.path import abspath, expanduser, join
# constants to print and detect warnings and errors in logs
SPADES_PY_ERROR_MESSAGE = "== Error == "
SPADES_PY_WARN_MESSAGE = "== Warning == "
SPADES_ERROR_MESSAGE = " ERROR "
SPADES_WARN_MESSAGE = " WARN "
# for correct warnings detection in case of continue_mode
continue_logfile_offset = None
# for removing tmp_dir even if error occurs
current_tmp_dir = None
def error(err_str, log=None, dipspades=False, prefix=SPADES_PY_ERROR_MESSAGE):
if not dipspades:
binary_name = "SPAdes"
else:
binary_name = "dipSPAdes"
if log:
log.info("\n\n" + prefix + " " + err_str)
log_warnings(log, with_error=True)
log.info("\nIn case you have troubles running " + binary_name + ", you can write to spades.support@cab.spbu.ru")
log.info("Please provide us with params.txt and " + binary_name.lower() + ".log files from the output directory.")
else:
sys.stderr.write("\n\n" + prefix + " " + err_str + "\n\n")
sys.stderr.write("\nIn case you have troubles running " + binary_name + ", you can write to spades.support@cab.spbu.ru\n")
sys.stderr.write("Please provide us with params.txt and " + binary_name.lower() + ".log files from the output directory.\n")
sys.stderr.flush()
if current_tmp_dir and os.path.isdir(current_tmp_dir):
shutil.rmtree(current_tmp_dir)
sys.exit(1)
def warning(warn_str, log=None, prefix="== Warning == "):
if log:
log.info("\n\n" + prefix + " " + warn_str + "\n\n")
else:
sys.stdout.write("\n\n" + prefix + " " + warn_str + "\n\n\n")
sys.stdout.flush()
def check_python_version():
if sys.version[0:3] not in options_storage.SUPPORTED_PYTHON_VERSIONS:
error("python version " + sys.version[0:3] + " is not supported!\n" + \
"Supported versions are " + ", ".join(options_storage.SUPPORTED_PYTHON_VERSIONS))
def get_spades_binaries_info_message():
return "You can obtain SPAdes binaries in one of two ways:" +\
"\n1. Download them from http://bioinf.spbau.ru/content/spades-download" +\
"\n2. Build source code with ./spades_compile.sh script"
def check_binaries(binary_dir, log):
for binary in ["hammer", "ionhammer", "spades", "bwa-spades", "dipspades"]:
binary_path = os.path.join(binary_dir, binary)
if not os.path.isfile(binary_path):
error("SPAdes binaries not found: " + binary_path + "\n" + get_spades_binaries_info_message(), log)
def check_file_existence(input_filename, message="", log=None, dipspades=False):
filename = abspath(expanduser(input_filename))
check_path_is_ascii(filename, message)
if not os.path.isfile(filename):
error("file not found: %s (%s)" % (filename, message), log=log, dipspades=dipspades)
options_storage.dict_of_rel2abs[input_filename] = filename
return filename
def check_dir_existence(input_dirname, message="", log=None, dipspades=False):
dirname = abspath(expanduser(input_dirname))
check_path_is_ascii(dirname, message)
if not os.path.isdir(dirname):
error("directory not found: %s (%s)" % (dirname, message), log=log, dipspades=dipspades)
options_storage.dict_of_rel2abs[input_dirname] = dirname
return dirname
def check_path_is_ascii(path, message=""):
if not is_ascii_string(path):
error("path contains non-ASCII characters: %s (%s)" % (path, message))
def ensure_dir_existence(dirname):
if os.path.isfile(dirname):
os.remove(dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
def recreate_dir(dirname):
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def check_files_duplication(filenames, log):
for filename in filenames:
if filenames.count(filename) != 1:
error("file %s was specified at least twice" % filename, log)
def check_reads_file_format(filename, message, only_assembler, library_type, log):
if filename in options_storage.dict_of_prefixes:
ext = options_storage.dict_of_prefixes[filename]
else:
ext = os.path.splitext(filename)[1]
if ext.lower() == '.gz':
pre_ext = os.path.splitext(filename[:-len(ext)])[1]
if (pre_ext + ext).lower() in options_storage.ALLOWED_READS_EXTENSIONS:
ext = pre_ext + ext
else: # allows ".fastq.1.gz" like extensions
pre_pre_ext = os.path.splitext(filename[:-len(pre_ext + ext)])[1]
ext = pre_pre_ext + ext
if ext.lower() not in options_storage.ALLOWED_READS_EXTENSIONS:
error("file with reads has unsupported format (only " + ", ".join(options_storage.ALLOWED_READS_EXTENSIONS) +
" are supported): %s (%s)" % (filename, message), log)
if not only_assembler and ext.lower() not in options_storage.BH_ALLOWED_READS_EXTENSIONS and \
library_type not in options_storage.LONG_READS_TYPES:
error("to run read error correction, reads should be in FASTQ format (" +
", ".join(options_storage.BH_ALLOWED_READS_EXTENSIONS) +
" are supported): %s (%s)" % (filename, message), log)
if library_type.endswith("contigs") and ext.lower() not in options_storage.CONTIGS_ALLOWED_READS_EXTENSIONS:
error("file with " + library_type + " should be in FASTA format (" +
", ".join(options_storage.CONTIGS_ALLOWED_READS_EXTENSIONS) +
" are supported): %s (%s)" % (filename, message), log)
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
elif "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def get_available_memory():
mem_info_filename = "/proc/meminfo"
avail_mem_header = "MemTotal:"
if os.path.isfile(mem_info_filename):
try:
for line in open(mem_info_filename):
if line.startswith(avail_mem_header):
avail_mem = int(line[len(avail_mem_header):].split()[0]) # in kB
avail_mem /= 1024 * 1024 # in GB
return avail_mem
except ValueError:
return None
except IOError:
return None
return None
# based on http://stackoverflow.com/questions/196345/how-to-check-if-a-string-in-python-is-in-ascii
def is_ascii_string(line):
try:
line.encode('ascii')
except UnicodeDecodeError: # python2
return False
except UnicodeEncodeError: # python3
return False
else:
return True
def process_readline(line, is_python3=sys.version.startswith('3.')):
if is_python3:
return str(line, 'utf-8').rstrip()
return line.rstrip()
def process_spaces(str):
if str.find(" ") != -1:
str = '"' + str + '"'
return str
def sys_call(cmd, log=None, cwd=None):
import shlex
import subprocess
if isinstance(cmd, list):
cmd_list = cmd
else:
cmd_list = shlex.split(cmd)
proc = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
output = ''
while not proc.poll():
line = process_readline(proc.stdout.readline())
if line:
if log:
log.info(line)
else:
output += line + "\n"
if proc.returncode is not None:
break
for line in proc.stdout.readlines():
line = process_readline(line)
if line:
if log:
log.info(line)
else:
output += line + "\n"
if proc.returncode:
error('system call for: "%s" finished abnormally, err code: %d' % (cmd, proc.returncode), log)
return output
def universal_sys_call(cmd, log, out_filename=None, err_filename=None, cwd=None):
'''
Runs cmd and redirects stdout to out_filename (if specified), stderr to err_filename (if specified), or to log otherwise
'''
import shlex
import subprocess
if isinstance(cmd, list):
cmd_list = cmd
else:
cmd_list = shlex.split(cmd)
if out_filename:
stdout = open(out_filename, 'w')
else:
stdout = subprocess.PIPE
if err_filename:
stderr = open(err_filename, 'w')
else:
stderr = subprocess.PIPE
proc = subprocess.Popen(cmd_list, stdout=stdout, stderr=stderr, cwd=cwd)
if log and (not out_filename or not err_filename):
while not proc.poll():
if not out_filename:
line = process_readline(proc.stdout.readline())
if line:
log.info(line)
if not err_filename:
line = process_readline(proc.stderr.readline())
if line:
log.info(line)
if proc.returncode is not None:
break
if not out_filename:
for line in proc.stdout.readlines():
if line != '':
log.info(process_readline(line))
if not err_filename:
for line in proc.stderr.readlines():
if line != '':
log.info(process_readline(line))
else:
proc.wait()
if out_filename:
stdout.close()
if err_filename:
stderr.close()
if proc.returncode:
error('system call for: "%s" finished abnormally, err code: %d' % (cmd, proc.returncode), log)
def save_data_to_file(data, file):
output = open(file, 'wb')
output.write(data.read())
output.close()
os.chmod(file, stat.S_IWRITE | stat.S_IREAD | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def get_important_messages_from_log(log_filename, warnings=True):
def already_saved(list_to_check, suffix): # for excluding duplicates (--continue-from may cause them)
for item in list_to_check:
if item.endswith(suffix):
return True
return False
if warnings:
spades_py_message = SPADES_PY_WARN_MESSAGE
spades_message = SPADES_WARN_MESSAGE
else: # errors
spades_py_message = SPADES_PY_ERROR_MESSAGE
spades_message = SPADES_ERROR_MESSAGE
### for capturing correct warnings in case of continue_mode
if continue_logfile_offset:
continued_log = open(log_filename, 'r')
continued_log.seek(continue_logfile_offset)
continued_stage_phrase = continued_log.readline()
while not continued_stage_phrase.strip():
continued_stage_phrase = continued_log.readline()
lines_to_check = continued_log.readlines()
continued_log.close()
all_lines = open(log_filename, 'r').readlines()
failed_stage_index = all_lines.index(continued_stage_phrase)
lines_to_check = all_lines[:failed_stage_index] + lines_to_check
else:
lines_to_check = open(log_filename, 'r').readlines()
spades_py_msgs = []
spades_msgs = []
IMPORTANT_MESSAGE_SUMMARY_PREFIX = ' * '
for line in lines_to_check:
if line.startswith(IMPORTANT_MESSAGE_SUMMARY_PREFIX):
continue
if line.find(spades_py_message) != -1:
suffix = line[line.find(spades_py_message) + len(spades_py_message):].strip()
line = line.replace(spades_py_message, '').strip()
if not already_saved(spades_py_msgs, suffix):
spades_py_msgs.append(IMPORTANT_MESSAGE_SUMMARY_PREFIX + line)
elif line.find(spades_message) != -1:
suffix = line[line.find(spades_message) + len(spades_message):].strip()
line = line.strip()
if not already_saved(spades_msgs, suffix):
spades_msgs.append(IMPORTANT_MESSAGE_SUMMARY_PREFIX + line)
return spades_py_msgs, spades_msgs
def get_logger_filename(log):
log_file = None
for h in log.__dict__['handlers']:
if h.__class__.__name__ == 'FileHandler':
log_file = h.baseFilename
return log_file
def log_warnings(log, with_error=False):
log_file = get_logger_filename(log)
if not log_file:
return False
for h in log.__dict__['handlers']:
h.flush()
spades_py_warns, spades_warns = get_important_messages_from_log(log_file, warnings=True)
if spades_py_warns or spades_warns:
if with_error:
log.info("\n======= SPAdes pipeline finished abnormally and WITH WARNINGS!")
else:
log.info("\n======= SPAdes pipeline finished WITH WARNINGS!")
warnings_filename = os.path.join(os.path.dirname(log_file), "warnings.log")
warnings_handler = logging.FileHandler(warnings_filename, mode='w')
log.addHandler(warnings_handler)
#log.info("===== Warnings occurred during SPAdes run =====")
log.info("")
if spades_py_warns:
log.info("=== Pipeline warnings:")
for line in spades_py_warns:
log.info(line)
if spades_warns:
log.info("=== Error correction and assembling warnings:")
for line in spades_warns:
log.info(line)
log.info("======= Warnings saved to " + warnings_filename)
log.removeHandler(warnings_handler)
if with_error:
spades_py_errors, spades_errors = get_important_messages_from_log(log_file, warnings=False)
log.info("")
log.info("=== ERRORs:")
for line in (spades_errors + spades_py_errors):
log.info(line)
return True
return False
def continue_from_here(log):
if options_storage.continue_mode:
options_storage.continue_mode = False
log_filename = get_logger_filename(log)
if log_filename:
log_file = open(log_filename, 'r')
log_file.seek(0, 2) # seek to the end of file
global continue_logfile_offset
continue_logfile_offset = log_file.tell()
def finish_here(log):
log.info("\n======= Skipping the rest of SPAdes pipeline (--stop-after was set to '%s'). "
"You can continue later with --continue or --restart-from options\n" % options_storage.stop_after)
options_storage.run_completed = True
def get_latest_dir(pattern):
def atoi(text):
if text.isdigit():
return int(text)
return text
def natural_keys(text):
return [atoi(c) for c in re.split('(\d+)', text)]
latest_dir = None
for dir_to_test in sorted(glob.glob(pattern), key=natural_keys, reverse=True):
if os.path.isdir(dir_to_test):
latest_dir = dir_to_test
break
return latest_dir
def get_tmp_dir(prefix="", base_dir=None):
global current_tmp_dir
if not base_dir:
base_dir = options_storage.tmp_dir
if not os.path.isdir(base_dir):
os.makedirs(base_dir)
current_tmp_dir = tempfile.mkdtemp(dir=base_dir, prefix=prefix)
return current_tmp_dir
### START for processing YAML files
def get_short_reads_type(option):
for short_reads_type in options_storage.SHORT_READS_TYPES.keys():
if option.startswith('--' + short_reads_type):
# additional check to except collisions with LONG_READS_TYPES, e.g. --s<#> and --sanger
if option[len('--' + short_reads_type):len('--' + short_reads_type) + 1].isdigit():
return short_reads_type
return None
def get_long_reads_type(option):
for long_reads_type in options_storage.LONG_READS_TYPES:
if option.startswith('--') and option in ("--" + long_reads_type):
return long_reads_type
return None
def is_single_read_type(option):
return option.startswith('--s') and option[3:].isdigit()
def get_lib_type_and_number(option):
# defaults for simple -1, -2, -s, --12 options
lib_type = 'pe'
lib_number = 1
if get_short_reads_type(option):
lib_type = get_short_reads_type(option)
lib_number = int(option[re.search("\d", option).start()])
elif get_long_reads_type(option):
lib_type = get_long_reads_type(option)
return lib_type, lib_number
def get_data_type(option):
if option.endswith('-12'):
data_type = 'interlaced reads'
elif option.endswith('-1'):
data_type = 'left reads'
elif option.endswith('-2'):
data_type = 'right reads'
elif option.endswith('-s') or is_single_read_type(option) or get_long_reads_type(option):
data_type = 'single reads'
else: # -rf, -ff, -fr
data_type = 'orientation'
return data_type
def get_option_prefix(data):
prefix = None
if data.find(':') != -1 and ('.' + data[:data.find(':')]) in options_storage.ALLOWED_READS_EXTENSIONS:
prefix = data[:data.find(':')]
data = data[data.find(':') + 1:]
return data, prefix
def add_to_dataset(option, data, dataset_data):
lib_type, lib_number = get_lib_type_and_number(option)
data_type = get_data_type(option)
if data_type == 'orientation':
data = option[-2:]
if lib_type in options_storage.SHORT_READS_TYPES:
record_id = options_storage.MAX_LIBS_NUMBER * sorted(options_storage.SHORT_READS_TYPES.keys()).index(lib_type) \
+ lib_number - 1
elif lib_type in options_storage.LONG_READS_TYPES:
record_id = options_storage.MAX_LIBS_NUMBER * len(options_storage.SHORT_READS_TYPES.keys()) \
+ options_storage.LONG_READS_TYPES.index(lib_type)
else:
error("can't detect library type from option %s!" % option)
if not dataset_data[record_id]: # setting default values for a new record
if lib_type in options_storage.SHORT_READS_TYPES:
dataset_data[record_id]['type'] = options_storage.SHORT_READS_TYPES[lib_type]
else:
dataset_data[record_id]['type'] = lib_type
if data_type.endswith('reads'):
data, prefix = get_option_prefix(data)
if prefix:
options_storage.dict_of_prefixes[data] = '.' + prefix
if data_type in dataset_data[record_id]:
dataset_data[record_id][data_type].append(data)
else:
dataset_data[record_id][data_type] = [data]
else: # other values are stored as plain strings
dataset_data[record_id][data_type] = data
def correct_dataset(dataset_data):
# removing empty reads libraries
corrected_dataset_data = []
for reads_library in dataset_data:
if not reads_library:
continue
has_reads = False
has_paired_reads = False
for key in reads_library.keys():
if key.endswith('reads'):
has_reads = True
if key in ['interlaced reads', 'left reads', 'right reads']:
has_paired_reads = True
break
if not has_reads:
continue
if not has_paired_reads and reads_library['type'] == 'paired-end':
reads_library['type'] = 'single'
if 'orientation' in reads_library:
del reads_library['orientation']
if 'orientation' not in reads_library:
if reads_library['type'] == 'paired-end' or reads_library['type'] == 'hq-mate-pairs':
reads_library['orientation'] = 'fr'
elif reads_library['type'] == 'mate-pairs':
reads_library['orientation'] = 'rf'
corrected_dataset_data.append(reads_library)
return corrected_dataset_data
def relative2abs_paths(dataset_data, dirname):
dirname = abspath(expanduser(dirname))
abs_paths_dataset_data = []
for reads_library in dataset_data:
for key, value in reads_library.items():
if key.endswith('reads'):
abs_paths_reads = []
for reads_file in value:
abs_path = abspath(join(dirname, expanduser(reads_file)))
options_storage.dict_of_rel2abs[reads_file] = abs_path
if reads_file in options_storage.dict_of_prefixes and abs_path != reads_file:
options_storage.dict_of_prefixes[abs_path] = options_storage.dict_of_prefixes[reads_file]
del options_storage.dict_of_prefixes[reads_file]
abs_paths_reads.append(abs_path)
reads_library[key] = abs_paths_reads
abs_paths_dataset_data.append(reads_library)
return abs_paths_dataset_data
def check_dataset_reads(dataset_data, only_assembler, log):
all_files = []
for id, reads_library in enumerate(dataset_data):
left_number = 0
right_number = 0
for key, value in reads_library.items():
if key.endswith('reads'):
for reads_file in value:
check_file_existence(reads_file, key + ', library number: ' + str(id + 1) +
', library type: ' + reads_library['type'], log)
check_reads_file_format(reads_file, key + ', library number: ' + str(id + 1) +
', library type: ' + reads_library['type'], only_assembler, reads_library['type'], log)
all_files.append(reads_file)
if key == 'left reads':
left_number = len(value)
elif key == 'right reads':
right_number = len(value)
if left_number != right_number:
error('the number of files with left paired reads is not equal to the number of files '
'with right paired reads (library number: ' + str(id + 1) +
', library type: ' + reads_library['type'] + ')!', log)
if not len(all_files):
error("You should specify at least one file with reads!", log)
check_files_duplication(all_files, log)
def check_single_reads_in_options(options, log):
only_old_style_options = True
old_style_single_reads = False
for option in options:
if option not in options_storage.reads_options:
continue
if option in options_storage.OLD_STYLE_READS_OPTIONS:
if option == '-s':
old_style_single_reads = True
else:
only_old_style_options = False
if not only_old_style_options and old_style_single_reads:
warning("It is recommended to specify single reads with --pe<#>-s, --mp<#>-s, --hqmp<#>-s, "
"or --s<#> option instead of -s!", log)
def get_lib_ids_by_type(dataset_data, types):
if type(types) is not list:
types = [types]
lib_ids = []
for id, reads_library in enumerate(dataset_data):
if reads_library['type'] in types:
lib_ids.append(id)
return lib_ids
def get_libs_by_type(dataset_data, types):
ids = get_lib_ids_by_type(dataset_data, types)
result = []
for id in ids:
result.append(dataset_data[id])
return result
def rm_libs_by_type(dataset_data, types):
ids = get_lib_ids_by_type(dataset_data, types)
for id in sorted(ids, reverse=True):
del dataset_data[id]
return dataset_data
def dataset_is_empty(dataset_data):
for reads_library in dataset_data:
if reads_library:
return False
return True
def dataset_has_gzipped_reads(dataset_data):
for reads_library in dataset_data:
for key in reads_library:
if key.endswith('reads'):
for reads_file in reads_library[key]:
if reads_file.endswith('.gz'):
return True
return False
def dataset_has_interlaced_reads(dataset_data):
for reads_library in dataset_data:
if 'interlaced reads' in reads_library:
return True
return False
def dataset_has_additional_contigs(dataset_data):
for reads_library in dataset_data:
if reads_library['type'].endswith('contigs'):
return True
return False
def dataset_has_nxmate_reads(dataset_data):
for reads_library in dataset_data:
if reads_library['type'] == 'nxmate':
return True
return False
def process_Ns_in_additional_contigs(dataset_data, dst, log):
new_dataset_data = list()
for reads_library in dataset_data:
new_reads_library = dict(reads_library)
if reads_library["type"].endswith("contigs"):
new_entry = []
for contigs in reads_library["single reads"]:
if contigs in options_storage.dict_of_prefixes:
ext = options_storage.dict_of_prefixes[contigs]
basename = contigs
else:
basename, ext = os.path.splitext(contigs)
gzipped = False
if ext.endswith('.gz'):
gzipped = True
if contigs not in options_storage.dict_of_prefixes:
basename, _ = os.path.splitext(basename)
modified, new_fasta = break_scaffolds(contigs, options_storage.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS,
replace_char='A', gzipped=gzipped)
if modified:
if not os.path.isdir(dst):
os.makedirs(dst)
new_filename = os.path.join(dst, os.path.basename(basename) + '.fasta')
if contigs in options_storage.dict_of_prefixes:
del options_storage.dict_of_prefixes[contigs]
log.info("== Processing additional contigs (%s): changing Ns to As and "
"splitting by continues (>= %d) Ns fragments (results are in %s directory)" % (contigs,
options_storage.THRESHOLD_FOR_BREAKING_ADDITIONAL_CONTIGS, dst))
write_fasta(new_filename, new_fasta)
new_entry.append(new_filename)
else:
new_entry.append(contigs)
new_reads_library["single reads"] = new_entry
new_dataset_data.append(new_reads_library)
return new_dataset_data
def split_interlaced_reads(dataset_data, dst, log):
def write_single_read(in_file, out_file, read_name=None, is_fastq=False, is_python3=False):
if read_name is None:
read_name = process_readline(in_file.readline(), is_python3)
if not read_name:
return '' # no next read
read_value = process_readline(in_file.readline(), is_python3)
line = process_readline(in_file.readline(), is_python3)
fpos = in_file.tell()
while (is_fastq and not line.startswith('+')) or (not is_fastq and not line.startswith('>')):
read_value += line
line = process_readline(in_file.readline(), is_python3)
if not line:
if fpos == in_file.tell():
break
fpos = in_file.tell()
out_file.write(read_name + '\n')
out_file.write(read_value + '\n')
if is_fastq:
read_quality = process_readline(in_file.readline(), is_python3)
line = process_readline(in_file.readline(), is_python3)
while not line.startswith('@'):
read_quality += line
line = process_readline(in_file.readline(), is_python3)
if not line:
if fpos == in_file.tell():
break
fpos = in_file.tell()
if len(read_value) != len(read_quality):
error('The length of sequence and quality lines should be the same! '
'Check read %s (SEQ length is %d, QUAL length is %d)' %
(read_name, len(read_value), len(read_quality)), log)
out_file.write('+\n')
out_file.write(read_quality + '\n')
return line # next read name or empty string
new_dataset_data = list()
for reads_library in dataset_data:
new_reads_library = dict(reads_library)
for key, value in reads_library.items():
if key == 'interlaced reads':
if 'left reads' not in new_reads_library:
new_reads_library['left reads'] = []
new_reads_library['right reads'] = []
for interlaced_reads in value:
if interlaced_reads in options_storage.dict_of_prefixes:
ext = options_storage.dict_of_prefixes[interlaced_reads]
else:
ext = os.path.splitext(interlaced_reads)[1]
was_compressed = False
if ext.endswith('.gz'):
was_compressed = True
input_file = gzip.open(interlaced_reads, 'r')
ungzipped = os.path.splitext(interlaced_reads)[0]
out_basename, ext = os.path.splitext(os.path.basename(ungzipped))
else:
input_file = open(interlaced_reads, 'r')
out_basename, ext = os.path.splitext(os.path.basename(interlaced_reads))
if interlaced_reads in options_storage.dict_of_prefixes:
ext = options_storage.dict_of_prefixes[interlaced_reads]
if ext.lower().startswith('.fq') or ext.lower().startswith('.fastq'):
is_fastq = True
ext = '.fastq'
else:
is_fastq = False
ext = '.fasta'
out_left_filename = os.path.join(dst, out_basename + "_1" + ext)
out_right_filename = os.path.join(dst, out_basename + "_2" + ext)
if not (options_storage.continue_mode and os.path.isfile(out_left_filename) and os.path.isfile(out_right_filename)):
options_storage.continue_mode = False
log.info("== Splitting " + interlaced_reads + " into left and right reads (in " + dst + " directory)")
out_files = [open(out_left_filename, 'w'), open(out_right_filename, 'w')]
i = 0
next_read_name = write_single_read(input_file, out_files[i], None, is_fastq,
sys.version.startswith('3.') and was_compressed)
while next_read_name:
i = (i + 1) % 2
next_read_name = write_single_read(input_file, out_files[i], next_read_name, is_fastq,
sys.version.startswith('3.') and was_compressed)
if i == 0:
error("The number of reads in file with interlaced reads (" + interlaced_reads + ") should be EVEN!", log)
out_files[0].close()
out_files[1].close()
input_file.close()
new_reads_library['left reads'].append(out_left_filename)
new_reads_library['right reads'].append(out_right_filename)
if interlaced_reads in options_storage.dict_of_prefixes:
del options_storage.dict_of_prefixes[interlaced_reads]
del new_reads_library['interlaced reads']
new_dataset_data.append(new_reads_library)
return new_dataset_data
def process_nxmate_reads(dataset_data, dst, log):
try:
import lucigen_nxmate
new_dataset_data = list()
for reads_library in dataset_data:
new_reads_library = dict(reads_library)
if new_reads_library['type'] == 'nxmate':
raw_left_reads = new_reads_library['left reads']
raw_right_reads = new_reads_library['right reads']
new_reads_library['left reads'] = []
new_reads_library['right reads'] = []
new_reads_library['single reads'] = []
for id, left_reads_fpath in enumerate(raw_left_reads):
right_reads_fpath = raw_right_reads[id]
processed_left_reads_fpath, processed_right_reads_fpath, single_reads_fpath = \
lucigen_nxmate.process_reads(left_reads_fpath, right_reads_fpath, dst, log)
new_reads_library['left reads'].append(processed_left_reads_fpath)
new_reads_library['right reads'].append(processed_right_reads_fpath)
new_reads_library['single reads'].append(single_reads_fpath)
new_reads_library['type'] = 'mate-pairs'
new_reads_library['orientation'] = 'fr'
new_dataset_data.append(new_reads_library)
return new_dataset_data
except ImportError:
error("Can't process Lucigen NxMate reads! lucigen_nxmate.py is missing!", log)
def pretty_print_reads(dataset_data, log, indent=' '):
READS_TYPES = ['left reads', 'right reads', 'interlaced reads', 'single reads']
for id, reads_library in enumerate(dataset_data):
log.info(indent + 'Library number: ' + str(id + 1) + ', library type: ' + reads_library['type'])
if 'orientation' in reads_library:
log.info(indent + ' orientation: ' + reads_library['orientation'])
for reads_type in READS_TYPES:
if reads_type not in reads_library:
value = 'not specified'
else:
value = str(reads_library[reads_type])
log.info(indent + ' ' + reads_type + ': ' + value)
### END: for processing YAML files
def read_fasta(filename, gzipped=False):
res_name = []
res_seq = []
first = True
seq = ''
if gzipped:
file_handler = gzip.open(filename)
else:
file_handler = open(filename)
for line in file_handler:
line = process_readline(line, gzipped and sys.version.startswith('3.'))
if not line:
continue
if line[0] == '>':
res_name.append(line.strip())
if not first:
res_seq.append(seq)
else:
first = False
seq = ''
else:
seq += line.strip()
res_seq.append(seq)
file_handler.close()
return zip(res_name, res_seq)
def write_fasta(filename, fasta):
outfile = open(filename, 'w')
for name, seq in fasta:
outfile.write(name + '\n')
for i in range(0, len(seq), 60):
outfile.write(seq[i : i + 60] + '\n')
outfile.close()
def break_scaffolds(input_filename, threshold, replace_char="N", gzipped=False):
new_fasta = []
modified = False
for id, (name, seq) in enumerate(read_fasta(input_filename, gzipped)):
i = 0
cur_contig_number = 1
cur_contig_start = 0
while (i < len(seq)) and (seq.find("N", i) != -1):
if replace_char != "N":
modified = True
start = seq.find("N", i)
end = start + 1
while (end != len(seq)) and (seq[end] == "N"):
end += 1
i = end + 1
if (end - start) >= threshold:
modified = True
if cur_contig_start != start:
new_fasta.append((name.split()[0] + "_" + str(cur_contig_number) + " " + " ".join(name.split()[1:]),
seq[cur_contig_start:start].replace("N", replace_char)))
cur_contig_number += 1
cur_contig_start = end
if cur_contig_start < len(seq):
new_fasta.append((name.split()[0] + "_" + str(cur_contig_number) + " " + " ".join(name.split()[1:]),
seq[cur_contig_start:].replace("N", replace_char)))
return modified, new_fasta
def comp(letter):
return {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}[letter.upper()]
def rev_comp(seq):
return ''.join(itertools.imap(comp, seq[::-1]))
def get_contig_id(s):
values = s.split("_")
if len(values) < 2 or (values[0] != ">NODE" and values[0] != "NODE"):
warning("Contig %s has unknown ID format" % (s))
return None
if s.find("'") != -1:
return (values[1] + "'")
return values[1]
def remove_fasta_pref(s):
if s.startswith(">"):
return s[1:]
return s
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
def is_int(value):
try:
int(value)
return True
except ValueError:
return False
|
INNUENDOWEB/INNUca
|
src/SPAdes-3.10.1-Linux/share/spades/spades_pipeline/support.py
|
Python
|
gpl-3.0
| 37,437
|
[
"BWA"
] |
b8ad5c581dd113ba5dff491e8d5251dd5d4eada9d2f1f3fef088d29b4097ca34
|
# proxy module
from __future__ import absolute_import
from mayavi.modules.glyph import *
|
enthought/etsproxy
|
enthought/mayavi/modules/glyph.py
|
Python
|
bsd-3-clause
| 89
|
[
"Mayavi"
] |
f8ee516a7c17e4a2605475412efd5c6beaf18f5c0d61f9ae7df7326977b22427
|
from sqlalchemy.sql import table, column, ClauseElement, operators
from sqlalchemy.sql.expression import _clone, _from_objects
from sqlalchemy import func, select, Integer, Table, \
Column, MetaData, extract, String, bindparam, tuple_, and_, union, text,\
case, ForeignKey, literal_column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, \
AssertsCompiledSQL
from sqlalchemy import testing
from sqlalchemy.sql.visitors import ClauseVisitor, CloningVisitor, \
cloned_traverse, ReplacingCloningVisitor
from sqlalchemy import exc
from sqlalchemy.sql import util as sql_util
from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
A = B = t1 = t2 = t3 = table1 = table2 = table3 = table4 = None
class TraversalTest(fixtures.TestBase, AssertsExecutionResults):
"""test ClauseVisitor's traversal, particularly its
ability to copy and modify a ClauseElement in place."""
@classmethod
def setup_class(cls):
global A, B
# establish two fictitious ClauseElements.
# define deep equality semantics as well as deep
# identity semantics.
class A(ClauseElement):
__visit_name__ = 'a'
def __init__(self, expr):
self.expr = expr
def is_other(self, other):
return other is self
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
return other.expr == self.expr
def __ne__(self, other):
return other.expr != self.expr
def __str__(self):
return "A(%s)" % repr(self.expr)
class B(ClauseElement):
__visit_name__ = 'b'
def __init__(self, *items):
self.items = items
def is_other(self, other):
if other is not self:
return False
for i1, i2 in zip(self.items, other.items):
if i1 is not i2:
return False
return True
__hash__ = ClauseElement.__hash__
def __eq__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return False
return True
def __ne__(self, other):
for i1, i2 in zip(self.items, other.items):
if i1 != i2:
return True
return False
def _copy_internals(self, clone=_clone):
self.items = [clone(i) for i in self.items]
def get_children(self, **kwargs):
return self.items
def __str__(self):
return "B(%s)" % repr([str(i) for i in self.items])
def test_test_classes(self):
a1 = A("expr1")
struct = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct2 = B(a1, A("expr2"), B(A("expr1b"), A("expr2b")), A("expr3"))
struct3 = B(a1, A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
assert a1.is_other(a1)
assert struct.is_other(struct)
assert struct == struct2
assert struct != struct3
assert not struct.is_other(struct2)
assert not struct.is_other(struct3)
def test_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert not struct.is_other(s2)
def test_no_clone(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
class Vis(ClauseVisitor):
def visit_a(self, a):
pass
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct == s2
assert struct.is_other(s2)
def test_clone_anon_label(self):
from sqlalchemy.sql.elements import Grouping
c1 = Grouping(literal_column('q'))
s1 = select([c1])
class Vis(CloningVisitor):
def visit_grouping(self, elem):
pass
vis = Vis()
s2 = vis.traverse(s1)
eq_(list(s2.inner_columns)[0].anon_label, c1.anon_label)
def test_change_in_place(self):
struct = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct2 = B(A("expr1"), A("expr2modified"), B(A("expr1b"),
A("expr2b")), A("expr3"))
struct3 = B(A("expr1"), A("expr2"), B(A("expr1b"),
A("expr2bmodified")), A("expr3"))
class Vis(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2":
a.expr = "expr2modified"
def visit_b(self, b):
pass
vis = Vis()
s2 = vis.traverse(struct)
assert struct != s2
assert not struct.is_other(s2)
assert struct2 == s2
class Vis2(CloningVisitor):
def visit_a(self, a):
if a.expr == "expr2b":
a.expr = "expr2bmodified"
def visit_b(self, b):
pass
vis2 = Vis2()
s3 = vis2.traverse(struct)
assert struct != s3
assert struct3 == s3
def test_visit_name(self):
# override fns in testlib/schema.py
from sqlalchemy import Column
class CustomObj(Column):
pass
assert CustomObj.__visit_name__ == Column.__visit_name__ == 'column'
foo, bar = CustomObj('foo', String), CustomObj('bar', String)
bin = foo == bar
set(ClauseVisitor().iterate(bin))
assert set(ClauseVisitor().iterate(bin)) == set([foo, bar, bin])
class BinaryEndpointTraversalTest(fixtures.TestBase):
"""test the special binary product visit"""
def _assert_traversal(self, expr, expected):
canary = []
def visit(binary, l, r):
canary.append((binary.operator, l, r))
print(binary.operator, l, r)
sql_util.visit_binary_product(visit, expr)
eq_(
canary, expected
)
def test_basic(self):
a, b = column("a"), column("b")
self._assert_traversal(
a == b,
[
(operators.eq, a, b)
]
)
def test_with_tuples(self):
a, b, c, d, b1, b1a, b1b, e, f = (
column("a"),
column("b"),
column("c"),
column("d"),
column("b1"),
column("b1a"),
column("b1b"),
column("e"),
column("f")
)
expr = tuple_(
a, b, b1 == tuple_(b1a, b1b == d), c
) > tuple_(
func.go(e + f)
)
self._assert_traversal(
expr,
[
(operators.gt, a, e),
(operators.gt, a, f),
(operators.gt, b, e),
(operators.gt, b, f),
(operators.eq, b1, b1a),
(operators.eq, b1b, d),
(operators.gt, c, e),
(operators.gt, c, f)
]
)
def test_composed(self):
a, b, e, f, q, j, r = (
column("a"),
column("b"),
column("e"),
column("f"),
column("q"),
column("j"),
column("r"),
)
expr = and_(
(a + b) == q + func.sum(e + f),
and_(
j == r,
f == q
)
)
self._assert_traversal(
expr,
[
(operators.eq, a, q),
(operators.eq, a, e),
(operators.eq, a, f),
(operators.eq, b, q),
(operators.eq, b, e),
(operators.eq, b, f),
(operators.eq, j, r),
(operators.eq, f, q),
]
)
def test_subquery(self):
a, b, c = column("a"), column("b"), column("c")
subq = select([c]).where(c == a).as_scalar()
expr = and_(a == b, b == subq)
self._assert_traversal(
expr,
[
(operators.eq, a, b),
(operators.eq, b, subq),
]
)
class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
"""test copy-in-place behavior of various ClauseElements."""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2, t3
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
t3 = Table('table3', MetaData(),
Column('col1', Integer),
Column('col2', Integer)
)
def test_binary(self):
clause = t1.c.col2 == t2.c.col2
eq_(str(clause), str(CloningVisitor().traverse(clause)))
def test_binary_anon_label_quirk(self):
t = table('t1', column('col1'))
f = t.c.col1 * 5
self.assert_compile(select([f]),
"SELECT t1.col1 * :col1_1 AS anon_1 FROM t1")
f.anon_label
a = t.alias()
f = sql_util.ClauseAdapter(a).traverse(f)
self.assert_compile(
select(
[f]),
"SELECT t1_1.col1 * :col1_1 AS anon_1 FROM t1 AS t1_1")
def test_join(self):
clause = t1.join(t2, t1.c.col2 == t2.c.col2)
c1 = str(clause)
assert str(clause) == str(CloningVisitor().traverse(clause))
class Vis(CloningVisitor):
def visit_binary(self, binary):
binary.right = t2.c.col3
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == str(t1.join(t2, t1.c.col2 == t2.c.col3))
def test_aliased_column_adapt(self):
clause = t1.select()
aliased = t1.select().alias()
aliased2 = t1.alias()
adapter = sql_util.ColumnAdapter(aliased)
f = select([
adapter.columns[c]
for c in aliased2.c
]).select_from(aliased)
s = select([aliased2]).select_from(aliased)
eq_(str(s), str(f))
f = select([
adapter.columns[func.count(aliased2.c.col1)]
]).select_from(aliased)
eq_(
str(select([func.count(aliased2.c.col1)]).select_from(aliased)),
str(f)
)
def test_aliased_cloned_column_adapt_inner(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# fixed by [ticket:2419]. the inside columns
# on aliased3 have _is_clone_of pointers to those of
# aliased2. corresponding_column checks these
# now.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2._raw_columns
])
f2 = select([
adapter.columns[c]
for c in aliased3._raw_columns
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_column_adapt_exported(self):
clause = select([t1.c.col1, func.foo(t1.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_aliased_cloned_schema_column_adapt_exported(self):
clause = select([t3.c.col1, func.foo(t3.c.col2).label('foo')])
aliased1 = select([clause.c.col1, clause.c.foo])
aliased2 = clause
aliased2.c.col1, aliased2.c.foo
aliased3 = cloned_traverse(aliased2, {}, {})
# also fixed by [ticket:2419]. When we look at the
# *outside* columns of aliased3, they previously did not
# have an _is_clone_of pointer. But we now modified _make_proxy
# to assign this.
adapter = sql_util.ColumnAdapter(aliased1)
f1 = select([
adapter.columns[c]
for c in aliased2.c
])
f2 = select([
adapter.columns[c]
for c in aliased3.c
])
eq_(
str(f1), str(f2)
)
def test_text(self):
clause = text(
"select * from table where foo=:bar",
bindparams=[bindparam('bar')])
c1 = str(clause)
class Vis(CloningVisitor):
def visit_textclause(self, text):
text.text = text.text + " SOME MODIFIER=:lala"
text._bindparams['lala'] = bindparam('lala')
clause2 = Vis().traverse(clause)
assert c1 == str(clause)
assert str(clause2) == c1 + " SOME MODIFIER=:lala"
assert list(clause._bindparams.keys()) == ['bar']
assert set(clause2._bindparams.keys()) == set(['bar', 'lala'])
def test_select(self):
s2 = select([t1])
s2_assert = str(s2)
s3_assert = str(select([t1], t1.c.col2 == 7))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
s3 = Vis().traverse(s2)
assert str(s3) == s3_assert
assert str(s2) == s2_assert
print(str(s2))
print(str(s3))
class Vis(ClauseVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
Vis().traverse(s2)
assert str(s2) == s3_assert
s4_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col3 == 9)))
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col3 == 9)
s4 = Vis().traverse(s3)
print(str(s3))
print(str(s4))
assert str(s4) == s4_assert
assert str(s3) == s3_assert
s5_assert = str(select([t1], and_(t1.c.col2 == 7, t1.c.col1 == 9)))
class Vis(CloningVisitor):
def visit_binary(self, binary):
if binary.left is t1.c.col3:
binary.left = t1.c.col1
binary.right = bindparam("col1", unique=True)
s5 = Vis().traverse(s4)
print(str(s4))
print(str(s5))
assert str(s5) == s5_assert
assert str(s4) == s4_assert
def test_union(self):
u = union(t1.select(), t2.select())
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == [str(c) for c in u.c]
u = union(t1.select(), t2.select())
cols = [str(c) for c in u.c]
u2 = CloningVisitor().traverse(u)
assert str(u) == str(u2)
assert [str(c) for c in u2.c] == cols
s1 = select([t1], t1.c.col1 == bindparam('id_param'))
s2 = select([t2])
u = union(s1, s2)
u2 = u.params(id_param=7)
u3 = u.params(id_param=10)
assert str(u) == str(u2) == str(u3)
assert u2.compile().params == {'id_param': 7}
assert u3.compile().params == {'id_param': 10}
def test_in(self):
expr = t1.c.col1.in_(['foo', 'bar'])
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_over(self):
expr = func.row_number().over(order_by=t1.c.col1)
expr2 = CloningVisitor().traverse(expr)
assert str(expr) == str(expr2)
def test_adapt_union(self):
u = union(
t1.select().where(t1.c.col1 == 4),
t1.select().where(t1.c.col1 == 5)
).alias()
assert sql_util.ClauseAdapter(u).traverse(t1) is u
def test_binds(self):
"""test that unique bindparams change their name upon clone()
to prevent conflicts"""
s = select([t1], t1.c.col1 == bindparam(None, unique=True)).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(
s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :param_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :param_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
s = select([t1], t1.c.col1 == 4).alias()
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s], s.c.col2 == s2.c.col2)
self.assert_compile(
s3, "SELECT anon_1.col1, anon_1.col2, anon_1.col3 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 WHERE table1.col1 = :col1_1) "
"AS anon_1, "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, table1.col3 "
"AS col3 FROM table1 WHERE table1.col1 = :col1_2) AS anon_2 "
"WHERE anon_1.col2 = anon_2.col2")
def test_extract(self):
s = select([extract('foo', t1.c.col1).label('col1')])
self.assert_compile(
s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
s2 = CloningVisitor().traverse(s).alias()
s3 = select([s2.c.col1])
self.assert_compile(
s,
"SELECT EXTRACT(foo FROM table1.col1) AS col1 FROM table1")
self.assert_compile(s3,
"SELECT anon_1.col1 FROM (SELECT EXTRACT(foo FROM "
"table1.col1) AS col1 FROM table1) AS anon_1")
@testing.emits_warning('.*replaced by another column with the same key')
def test_alias(self):
subq = t2.select().alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
orig = str(s)
s2 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s2)
s4 = CloningVisitor().traverse(s2)
assert orig == str(s) == str(s2) == str(s4)
s3 = sql_util.ClauseAdapter(table('foo')).traverse(s)
assert orig == str(s) == str(s3)
s4 = sql_util.ClauseAdapter(table('foo')).traverse(s3)
assert orig == str(s) == str(s3) == str(s4)
subq = subq.alias('subq')
s = select([t1.c.col1, subq.c.col1],
from_obj=[t1, subq,
t1.join(subq, t1.c.col1 == subq.c.col2)]
)
s5 = CloningVisitor().traverse(s)
assert orig == str(s) == str(s5)
def test_correlated_select(self):
s = select(['*'], t1.c.col1 == t2.c.col1,
from_obj=[t1, t2]).correlate(t2)
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
self.assert_compile(
select([t2]).where(t2.c.col1 == Vis().traverse(s)),
"SELECT table2.col1, table2.col2, table2.col3 "
"FROM table2 WHERE table2.col1 = "
"(SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
"AND table1.col2 = :col2_1)"
)
def test_this_thing(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([s.c.col1])
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1 WHERE '
'table1.col1 = :col1_1) AS anon_1')
t1a = t1.alias()
s2 = sql_util.ClauseAdapter(t1a).traverse(s2)
self.assert_compile(s2,
'SELECT anon_1.col1 FROM (SELECT '
'table1_1.col1 AS col1, table1_1.col2 AS '
'col2, table1_1.col3 AS col3 FROM table1 '
'AS table1_1 WHERE table1_1.col1 = '
':col1_1) AS anon_1')
def test_select_fromtwice_one(self):
t1a = t1.alias()
s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1a)
s = select([t1]).where(t1.c.col1 == s)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
s = CloningVisitor().traverse(s)
self.assert_compile(
s, "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
"WHERE table1.col1 = "
"(SELECT 1 FROM table1, table1 AS table1_1 "
"WHERE table1.col1 = table1_1.col1)")
def test_select_fromtwice_two(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([1], t1.c.col1 == s.c.col1, from_obj=s).correlate(t1)
s3 = select([t1]).where(t1.c.col1 == s2)
self.assert_compile(
s3, "SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)")
s4 = ReplacingCloningVisitor().traverse(s3)
self.assert_compile(
s4, "SELECT table1.col1, table1.col2, table1.col3 "
"FROM table1 WHERE table1.col1 = "
"(SELECT 1 FROM "
"(SELECT table1.col1 AS col1, table1.col2 AS col2, "
"table1.col3 AS col3 FROM table1 "
"WHERE table1.col1 = :col1_1) "
"AS anon_1 WHERE table1.col1 = anon_1.col1)")
class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_correlation_on_clone(self):
t1alias = t1.alias('t1alias')
t2alias = t2.alias('t2alias')
vis = sql_util.ClauseAdapter(t1alias)
s = select(['*'], from_obj=[t1alias, t2alias]).as_scalar()
assert t2alias in s._froms
assert t1alias in s._froms
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
assert t2alias not in s._froms # not present because it's been
# cloned
assert t1alias in s._froms # present because the adapter placed
# it there
# correlate list on "s" needs to take into account the full
# _cloned_set for each element in _froms when correlating
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select(['*'], from_obj=[t1alias,
t2alias]).correlate(t2alias).as_scalar()
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = vis.traverse(s)
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = CloningVisitor().traverse(s)
self.assert_compile(select(['*'], t2alias.c.col1 == s),
'SELECT * FROM table2 AS t2alias WHERE '
't2alias.col1 = (SELECT * FROM table1 AS '
't1alias)')
s = select(['*']).where(t1.c.col1 == t2.c.col1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = select(['*']).where(t1.c.col1
== t2.c.col1).correlate(t1).as_scalar()
self.assert_compile(select([t1.c.col1, s]),
'SELECT table1.col1, (SELECT * FROM table2 '
'WHERE table1.col1 = table2.col1) AS '
'anon_1 FROM table1')
vis = sql_util.ClauseAdapter(t1alias)
s = vis.traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
s = CloningVisitor().traverse(s)
self.assert_compile(select([t1alias.c.col1, s]),
'SELECT t1alias.col1, (SELECT * FROM '
'table2 WHERE t1alias.col1 = table2.col1) '
'AS anon_1 FROM table1 AS t1alias')
@testing.fails_on_everything_except()
def test_joins_dont_adapt(self):
# adapting to a join, i.e. ClauseAdapter(t1.join(t2)), doesn't
# make much sense. ClauseAdapter doesn't make any changes if
# it's against a straight join.
users = table('users', column('id'))
addresses = table('addresses', column('id'), column('user_id'))
ualias = users.alias()
s = select([func.count(addresses.c.id)], users.c.id
== addresses.c.user_id).correlate(users)
s = sql_util.ClauseAdapter(ualias).traverse(s)
j1 = addresses.join(ualias, addresses.c.user_id == ualias.c.id)
self.assert_compile(sql_util.ClauseAdapter(j1).traverse(s),
'SELECT count(addresses.id) AS count_1 '
'FROM addresses WHERE users_1.id = '
'addresses.user_id')
def test_table_to_alias_1(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
assert list(_from_objects(ff)) == [t1alias]
def test_table_to_alias_2(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], from_obj=[t1])),
'SELECT * FROM table1 AS t1alias')
def test_table_to_alias_3(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(select(['*'], t1.c.col1 == t2.c.col2),
'SELECT * FROM table1, table2 WHERE '
'table1.col1 = table2.col2')
def test_table_to_alias_4(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_5(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
def test_table_to_alias_6(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).where(
t1alias.c.col1 == vis.traverse(
select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).
correlate(t1)
)
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
"(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)"
)
def test_table_to_alias_7(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
select([t1alias, t2]).
where(t1alias.c.col1 == vis.traverse(
select(['*'], t1.c.col1 == t2.c.col2, from_obj=[t1, t2]).
correlate(t2))),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"table2.col1, table2.col2, table2.col3 "
"FROM table1 AS t1alias, table2 "
"WHERE t1alias.col1 = "
"(SELECT * FROM table1 AS t1alias "
"WHERE t1alias.col1 = table2.col2)")
def test_table_to_alias_8(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(case([(t1.c.col1 == 5, t1.c.col2)], else_=t1.c.col1)),
'CASE WHEN (t1alias.col1 = :col1_1) THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_9(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(
vis.traverse(
case(
[
(5,
t1.c.col2)],
value=t1.c.col1,
else_=t1.c.col1)),
'CASE t1alias.col1 WHEN :param_1 THEN '
't1alias.col2 ELSE t1alias.col1 END')
def test_table_to_alias_10(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_11(self):
s = select(['*'], from_obj=[t1]).alias('foo')
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(s.select()),
'SELECT foo.* FROM (SELECT * FROM table1 '
'AS t1alias) AS foo')
def test_table_to_alias_12(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
def test_table_to_alias_13(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
self.assert_compile(select([ff]),
'SELECT count(t1alias.col1) AS foo FROM '
'table1 AS t1alias')
assert list(_from_objects(ff)) == [t1alias]
# def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select([func.count(t1.c
# .col1).l abel('foo')]), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
def test_table_to_alias_14(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_15(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
def test_table_to_alias_16(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
select([t1alias, t2alias]).where(
t1alias.c.col1 ==
vis.traverse(select(['*'],
t1.c.col1 == t2.c.col2,
from_obj=[t1, t2]).correlate(t1))
),
"SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
"t2alias.col1, t2alias.col2, t2alias.col3 "
"FROM table1 AS t1alias, table2 AS t2alias "
"WHERE t1alias.col1 = "
"(SELECT * FROM table2 AS t2alias "
"WHERE t1alias.col1 = t2alias.col2)"
)
def test_table_to_alias_17(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(
t2alias.select().where(
t2alias.c.col2 == vis.traverse(
select(
['*'],
t1.c.col1 == t2.c.col2,
from_obj=[
t1,
t2]).correlate(t2))),
'SELECT t2alias.col1, t2alias.col2, t2alias.col3 '
'FROM table2 AS t2alias WHERE t2alias.col2 = '
'(SELECT * FROM table1 AS t1alias WHERE '
't1alias.col1 = t2alias.col2)')
def test_include_exclude(self):
m = MetaData()
a = Table('a', m,
Column('id', Integer, primary_key=True),
Column('xxx_id', Integer,
ForeignKey('a.id', name='adf', use_alter=True)
)
)
e = (a.c.id == a.c.xxx_id)
assert str(e) == "a.id = a.xxx_id"
b = a.alias()
e = sql_util.ClauseAdapter(b, include=set([a.c.id]),
equivalents={a.c.id: set([a.c.id])}
).traverse(e)
assert str(e) == "a_1.id = a.xxx_id"
def test_recursive_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
# force a recursion overflow, by linking a.c.x<->c.c.x, and
# asking for a nonexistent col. corresponding_column should prevent
# endless depth.
adapt = sql_util.ClauseAdapter(
b, equivalents={a.c.x: set([c.c.x]), c.c.x: set([a.c.x])})
assert adapt._corresponding_column(a.c.x, False) is None
def test_multilevel_equivalents(self):
m = MetaData()
a = Table('a', m, Column('x', Integer), Column('y', Integer))
b = Table('b', m, Column('x', Integer), Column('y', Integer))
c = Table('c', m, Column('x', Integer), Column('y', Integer))
alias = select([a]).select_from(a.join(b, a.c.x == b.c.x)).alias()
# two levels of indirection from c.x->b.x->a.x, requires recursive
# corresponding_column call
adapt = sql_util.ClauseAdapter(
alias, equivalents={b.c.x: set([a.c.x]), c.c.x: set([b.c.x])})
assert adapt._corresponding_column(a.c.x, False) is alias.c.x
assert adapt._corresponding_column(c.c.x, False) is alias.c.x
def test_join_to_alias(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
j1 = a.outerjoin(b)
j2 = select([j1], use_labels=True)
j3 = c.join(j2, j2.c.b_id == c.c.bid)
j4 = j3.outerjoin(d)
self.assert_compile(j4,
'c JOIN (SELECT a.id AS a_id, b.id AS '
'b_id, b.aid AS b_aid FROM a LEFT OUTER '
'JOIN b ON a.id = b.aid) ON b_id = c.bid '
'LEFT OUTER JOIN d ON a_id = d.aid')
j5 = j3.alias('foo')
j6 = sql_util.ClauseAdapter(j5).copy_and_process([j4])[0]
# this statement takes c join(a join b), wraps it inside an
# aliased "select * from c join(a join b) AS foo". the outermost
# right side "left outer join d" stays the same, except "d"
# joins against foo.a_id instead of plain "a_id"
self.assert_compile(j6,
'(SELECT c.id AS c_id, c.bid AS c_bid, '
'a_id AS a_id, b_id AS b_id, b_aid AS '
'b_aid FROM c JOIN (SELECT a.id AS a_id, '
'b.id AS b_id, b.aid AS b_aid FROM a LEFT '
'OUTER JOIN b ON a.id = b.aid) ON b_id = '
'c.bid) AS foo LEFT OUTER JOIN d ON '
'foo.a_id = d.aid')
def test_derived_from(self):
assert select([t1]).is_derived_from(t1)
assert not select([t2]).is_derived_from(t1)
assert not t1.is_derived_from(select([t1]))
assert t1.alias().is_derived_from(t1)
s1 = select([t1, t2]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
assert s2.is_derived_from(s1)
s2 = s2._clone()
assert s2.is_derived_from(s1)
def test_aliasedselect_to_aliasedselect_straight(self):
# original issue from ticket #904
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(s1),
'SELECT foo.col1, foo.col2, foo.col3 FROM '
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1) '
'AS foo LIMIT :param_1 OFFSET :param_2',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
j = s1.outerjoin(t2, s1.c.col1 == t2.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, table2.col1, table2.col2, '
'table2.col3 FROM (SELECT foo.col1 AS '
'col1, foo.col2 AS col2, foo.col3 AS col3 '
'FROM (SELECT table1.col1 AS col1, '
'table1.col2 AS col2, table1.col3 AS col3 '
'FROM table1) AS foo LIMIT :param_1 OFFSET '
':param_2) AS anon_1 LEFT OUTER JOIN '
'table2 ON anon_1.col1 = table2.col1',
{'param_1': 5, 'param_2': 10})
def test_aliasedselect_to_aliasedselect_join_nested_table(self):
s1 = select([t1]).alias('foo')
s2 = select([s1]).limit(5).offset(10).alias()
talias = t1.alias('bar')
assert not s2.is_derived_from(talias)
j = s1.outerjoin(talias, s1.c.col1 == talias.c.col1)
self.assert_compile(sql_util.ClauseAdapter(s2).traverse(j).select(),
'SELECT anon_1.col1, anon_1.col2, '
'anon_1.col3, bar.col1, bar.col2, bar.col3 '
'FROM (SELECT foo.col1 AS col1, foo.col2 '
'AS col2, foo.col3 AS col3 FROM (SELECT '
'table1.col1 AS col1, table1.col2 AS col2, '
'table1.col3 AS col3 FROM table1) AS foo '
'LIMIT :param_1 OFFSET :param_2) AS anon_1 '
'LEFT OUTER JOIN table1 AS bar ON '
'anon_1.col1 = bar.col1', {'param_1': 5,
'param_2': 10})
def test_functions(self):
self.assert_compile(
sql_util.ClauseAdapter(t1.alias()).
traverse(func.count(t1.c.col1)),
'count(table1_1.col1)')
s = select([func.count(t1.c.col1)])
self.assert_compile(sql_util.ClauseAdapter(t1.alias()).traverse(s),
'SELECT count(table1_1.col1) AS count_1 '
'FROM table1 AS table1_1')
def test_recursive(self):
metadata = MetaData()
a = Table('a', metadata,
Column('id', Integer, primary_key=True))
b = Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
c = Table('c', metadata,
Column('id', Integer, primary_key=True),
Column('bid', Integer, ForeignKey('b.id')),
)
d = Table('d', metadata,
Column('id', Integer, primary_key=True),
Column('aid', Integer, ForeignKey('a.id')),
)
u = union(
a.join(b).select().apply_labels(),
a.join(d).select().apply_labels()
).alias()
self.assert_compile(
sql_util.ClauseAdapter(u).
traverse(select([c.c.bid]).where(c.c.bid == u.c.b_aid)),
"SELECT c.bid "
"FROM c, (SELECT a.id AS a_id, b.id AS b_id, b.aid AS b_aid "
"FROM a JOIN b ON a.id = b.aid UNION SELECT a.id AS a_id, d.id "
"AS d_id, d.aid AS d_aid "
"FROM a JOIN d ON a.id = d.aid) AS anon_1 "
"WHERE c.bid = anon_1.b_aid"
)
class SpliceJoinsTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global table1, table2, table3, table4
def _table(name):
return table(name, column('col1'), column('col2'),
column('col3'))
table1, table2, table3, table4 = [
_table(name) for name in (
'table1', 'table2', 'table3', 'table4')]
def test_splice(self):
t1, t2, t3, t4 = table1, table2, table1.alias(), table2.alias()
j = t1.join(
t2,
t1.c.col1 == t2.c.col1).join(
t3,
t2.c.col1 == t3.c.col1).join(
t4,
t4.c.col1 == t1.c.col1)
s = select([t1]).where(t1.c.col2 < 5).alias()
self.assert_compile(sql_util.splice_joins(s, j),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'WHERE table1.col2 < :col2_1) AS anon_1 '
'JOIN table2 ON anon_1.col1 = table2.col1 '
'JOIN table1 AS table1_1 ON table2.col1 = '
'table1_1.col1 JOIN table2 AS table2_1 ON '
'table2_1.col1 = anon_1.col1')
def test_stop_on(self):
t1, t2, t3 = table1, table2, table3
j1 = t1.join(t2, t1.c.col1 == t2.c.col1)
j2 = j1.join(t3, t2.c.col1 == t3.c.col1)
s = select([t1]).select_from(j1).alias()
self.assert_compile(sql_util.splice_joins(s, j2),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table2 ON anon_1.col1 = '
'table2.col1 JOIN table3 ON table2.col1 = '
'table3.col1')
self.assert_compile(sql_util.splice_joins(s, j2, j1),
'(SELECT table1.col1 AS col1, table1.col2 '
'AS col2, table1.col3 AS col3 FROM table1 '
'JOIN table2 ON table1.col1 = table2.col1) '
'AS anon_1 JOIN table3 ON table2.col1 = '
'table3.col1')
def test_splice_2(self):
t2a = table2.alias()
t3a = table3.alias()
j1 = table1.join(
t2a,
table1.c.col1 == t2a.c.col1).join(
t3a,
t2a.c.col2 == t3a.c.col2)
t2b = table4.alias()
j2 = table1.join(t2b, table1.c.col3 == t2b.c.col3)
self.assert_compile(sql_util.splice_joins(table1, j1),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2')
self.assert_compile(sql_util.splice_joins(table1, j2),
'table1 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
self.assert_compile(
sql_util.splice_joins(
sql_util.splice_joins(
table1,
j1),
j2),
'table1 JOIN table2 AS table2_1 ON '
'table1.col1 = table2_1.col1 JOIN table3 '
'AS table3_1 ON table2_1.col2 = '
'table3_1.col2 JOIN table4 AS table4_1 ON '
'table1.col3 = table4_1.col3')
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"""tests the generative capability of Select"""
__dialect__ = 'default'
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_columns(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.column('yyy')
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3, yyy FROM table1')
assert s.columns is not select_copy.columns
assert s._columns is not select_copy._columns
assert s._raw_columns is not select_copy._raw_columns
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_froms(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.select_from(t2)
self.assert_compile(select_copy,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1, table2')
assert s._froms is not select_copy._froms
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_prefixes(self):
s = t1.select()
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
select_copy = s.prefix_with('FOOBER')
self.assert_compile(select_copy,
'SELECT FOOBER table1.col1, table1.col2, '
'table1.col3 FROM table1')
self.assert_compile(s,
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
def test_execution_options(self):
s = select().execution_options(foo='bar')
s2 = s.execution_options(bar='baz')
s3 = s.execution_options(foo='not bar')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
assert s3._execution_options == dict(foo='not bar')
def test_invalid_options(self):
assert_raises(
exc.ArgumentError,
select().execution_options, compiled_cache={}
)
assert_raises(
exc.ArgumentError,
select().execution_options,
isolation_level='READ_COMMITTED'
)
# this feature not available yet
def _NOTYET_test_execution_options_in_kwargs(self):
s = select(execution_options=dict(foo='bar'))
s2 = s.execution_options(bar='baz')
# The original select should not be modified.
assert s._execution_options == dict(foo='bar')
# s2 should have its execution_options based on s, though.
assert s2._execution_options == dict(foo='bar', bar='baz')
# this feature not available yet
def _NOTYET_test_execution_options_in_text(self):
s = text('select 42', execution_options=dict(foo='bar'))
assert s._execution_options == dict(foo='bar')
class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL):
"""Tests the generative capability of Insert, Update"""
__dialect__ = 'default'
# fixme: consolidate converage from elsewhere here and expand
@classmethod
def setup_class(cls):
global t1, t2
t1 = table("table1",
column("col1"),
column("col2"),
column("col3"),
)
t2 = table("table2",
column("col1"),
column("col2"),
column("col3"),
)
def test_prefixes(self):
i = t1.insert()
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen = i.prefix_with("foober")
self.assert_compile(gen,
"INSERT foober INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
self.assert_compile(i,
"INSERT INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
i2 = t1.insert(prefixes=['squiznart'])
self.assert_compile(i2,
"INSERT squiznart INTO table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
gen2 = i2.prefix_with("quux")
self.assert_compile(gen2,
"INSERT squiznart quux INTO "
"table1 (col1, col2, col3) "
"VALUES (:col1, :col2, :col3)")
def test_add_kwarg(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values(col1=5)
eq_(i.parameters, {"col1": 5})
i = i.values(col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_single(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values((5, 6, 7))
eq_(i.parameters, {"col1": 5, "col2": 6, "col3": 7})
def test_kw_and_dict_simulatenously_single(self):
i = t1.insert()
i = i.values({"col1": 5}, col2=7)
eq_(i.parameters, {"col1": 5, "col2": 7})
def test_via_tuple_multi(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values([(5, 6, 7), (8, 9, 10)])
eq_(i.parameters, [
{"col1": 5, "col2": 6, "col3": 7},
{"col1": 8, "col2": 9, "col3": 10},
]
)
def test_inline_values_single(self):
i = t1.insert(values={"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
def test_inline_values_multi(self):
i = t1.insert(values=[{"col1": 5}, {"col1": 6}])
eq_(i.parameters, [{"col1": 5}, {"col1": 6}])
is_(i._has_multi_parameters, True)
def test_add_dictionary(self):
i = t1.insert()
eq_(i.parameters, None)
i = i.values({"col1": 5})
eq_(i.parameters, {"col1": 5})
is_(i._has_multi_parameters, False)
i = i.values({"col1": 6})
# note replaces
eq_(i.parameters, {"col1": 6})
is_(i._has_multi_parameters, False)
i = i.values({"col2": 7})
eq_(i.parameters, {"col1": 6, "col2": 7})
is_(i._has_multi_parameters, False)
def test_add_kwarg_disallowed_multi(self):
i = t1.insert()
i = i.values([{"col1": 5}, {"col1": 7}])
assert_raises_message(
exc.InvalidRequestError,
"This construct already has multiple parameter sets.",
i.values, col2=7
)
def test_cant_mix_single_multi_formats_dict_to_list(self):
i = t1.insert().values(col1=5)
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, [{"col1": 6}]
)
def test_cant_mix_single_multi_formats_list_to_dict(self):
i = t1.insert().values([{"col1": 6}])
assert_raises_message(
exc.ArgumentError,
"Can't mix single-values and multiple values "
"formats in one statement",
i.values, {"col1": 5}
)
def test_erroneous_multi_args_dicts(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, {"col1": 5}, {"col1": 7}
)
def test_erroneous_multi_args_tuples(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.",
i.values, (5, 6, 7), (8, 9, 10)
)
def test_erroneous_multi_args_plus_kw(self):
i = t1.insert()
assert_raises_message(
exc.ArgumentError,
"Can't pass kwargs and multiple parameter sets simultaenously",
i.values, [{"col1": 5}], col2=7
)
def test_update_no_support_multi_values(self):
u = t1.update()
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
u.values, [{"col1": 5}, {"col1": 7}]
)
def test_update_no_support_multi_constructor(self):
assert_raises_message(
exc.InvalidRequestError,
"This construct does not support multiple parameter sets.",
t1.update, values=[{"col1": 5}, {"col1": 7}]
)
|
Abi1ity/uniclust2.0
|
SQLAlchemy-0.9.9/test/sql/test_generative.py
|
Python
|
bsd-3-clause
| 58,459
|
[
"ADF",
"VisIt"
] |
a69173cfdf2f7f8d30b50bbfbbf67f4107b72817ede0dafa6b98536869c9718c
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010-2017 Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
SurnameListPage - Index for first letters of surname
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (get_first_letters, _KEYPERSON,
alphabet_navigation, html_escape,
sort_people, name_to_md5,
first_letter, get_index_letter,
primary_difference, FULLCLEAR)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb")
getcontext().prec = 8
#################################################
#
# Creates the Surname List page
#
#################################################
class SurnameListPage(BasePage):
"""
This class is responsible for displaying the list of Surnames
"""
ORDER_BY_NAME = 0
ORDER_BY_COUNT = 1
def __init__(self, report, title, ppl_handle_list,
order_by=ORDER_BY_NAME, filename="surnames"):
"""
@param: report -- The instance of the main report class for
this report
@param: title -- Is the title of the web page
@param: ppl_handle_list -- The list of people for whom we need to create
a page.
@param: order_by -- The way to sort surnames :
Surnames or Surnames count
@param: filename -- The name to use for the Surnames page
"""
BasePage.__init__(self, report, title)
prev_surname = ""
prev_letter = " "
if order_by == self.ORDER_BY_NAME:
output_file, sio = self.report.create_file(filename)
surnamelistpage, head, body = self.write_header(self._('Surnames'))
else:
output_file, sio = self.report.create_file("surnames_count")
(surnamelistpage, head,
body) = self.write_header(self._('Surnames by person count'))
# begin surnames division
with Html("div", class_="content", id="surnames") as surnamelist:
body += surnamelist
# page message
msg = self._('This page contains an index of all the '
'surnames in the database. Selecting a link '
'will lead to a list of individuals in the '
'database with this same surname.')
surnamelist += Html("p", msg, id="description")
# add alphabet navigation...
# only if surname list not surname count
if order_by == self.ORDER_BY_NAME:
index_list = get_first_letters(self.r_db, ppl_handle_list,
_KEYPERSON, rlocale=self.rlocale)
alpha_nav = alphabet_navigation(index_list, self.rlocale)
if alpha_nav is not None:
surnamelist += alpha_nav
if order_by == self.ORDER_BY_COUNT:
table_id = 'SortByCount'
else:
table_id = 'SortByName'
# begin surnamelist table and table head
with Html("table", class_="infolist primobjlist surnamelist",
id=table_id) as table:
surnamelist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
trow += Html("th", self._("Letter"), class_="ColumnLetter",
inline=True)
# create table header surname hyperlink
fname = self.report.surname_fname + self.ext
tcell = Html("th", class_="ColumnSurname", inline=True)
trow += tcell
hyper = Html("a", self._("Surname"),
href=fname, title=self._("Surnames"))
tcell += hyper
# create table header number of people hyperlink
fname = "surnames_count" + self.ext
tcell = Html("th", class_="ColumnQuantity", inline=True)
trow += tcell
num_people = self._("Number of People")
hyper = Html("a", num_people, href=fname, title=num_people)
tcell += hyper
# begin table body
with Html("tbody") as tbody:
table += tbody
ppl_handle_list = sort_people(self.r_db, ppl_handle_list,
self.rlocale)
if order_by == self.ORDER_BY_COUNT:
temp_list = {}
for (surname, data_list) in ppl_handle_list:
index_val = "%90d_%s" % (999999999-len(data_list),
surname)
temp_list[index_val] = (surname, data_list)
lkey = self.rlocale.sort_key
ppl_handle_list = (temp_list[key]
for key in sorted(temp_list,
key=lkey))
first = True
first_surname = True
for (surname, data_list) in ppl_handle_list:
if surname and not surname.isspace():
letter = first_letter(surname)
if order_by == self.ORDER_BY_NAME:
# There will only be an alphabetic index list if
# the ORDER_BY_NAME page is being generated
letter = get_index_letter(letter, index_list,
self.rlocale)
else:
letter = ' '
surname = self._("<absent>")
trow = Html("tr")
tbody += trow
tcell = Html("td", class_="ColumnLetter", inline=True)
trow += tcell
if first or primary_difference(letter, prev_letter,
self.rlocale):
first = False
prev_letter = letter
trow.attr = 'class = "BeginLetter"'
ttle = self._("Surnames beginning with "
"letter %s") % letter
hyper = Html("a", letter, name=letter,
title=ttle, inline=True)
tcell += hyper
elif first_surname or surname != prev_surname:
first_surname = False
tcell += " "
prev_surname = surname
trow += Html("td",
self.surname_link(name_to_md5(surname),
#html_escape(surname)),
surname),
class_="ColumnSurname", inline=True)
trow += Html("td", len(data_list),
class_="ColumnQuantity", inline=True)
# create footer section
# add clearline for proper styling
footer = self.write_footer(None)
body += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(surnamelistpage,
output_file, sio, 0) # 0 => current date modification
def surname_link(self, fname, name, opt_val=None, uplink=False):
"""
Create a link to the surname page.
@param: fname -- Path to the file name
@param: name -- Name to see in the link
@param: opt_val -- Option value to use
@param: uplink -- If True, then "../../../" is inserted in front of
the result.
"""
url = self.report.build_url_fname_html(fname, "srn", uplink)
hyper = Html("a", html_escape(name), href=url,
title=name, inline=True)
if opt_val is not None:
hyper += opt_val
# return hyperlink to its caller
return hyper
|
jralls/gramps
|
gramps/plugins/webreport/surnamelist.py
|
Python
|
gpl-2.0
| 10,613
|
[
"Brian"
] |
a9b7fb452f9b42b841a149b3b364f5e97c65a2299132e2d9ddaa68d2bb8b1c5e
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen, loops, loopcolor):
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) )
previous=p
n=n+1
print "rendered loop ",nloop, " with ", len(lop), " points"
if len(lop) == 2:
for p in lop:
print p
myscreen.addActor( camvtk.Sphere(center=(p.x,p.y,p.z),radius=0.1,color=camvtk.green) )
nloop = nloop+1
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
#stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#stl = camvtk.STLSurf("../../stl/porche.stl")
#stl = camvtk.STLSurf("../../stl/ktoolcav.stl")
#stl = camvtk.STLSurf("../../stl/ktoolcor.stl")
stl = camvtk.STLSurf("../../stl/waterline1.stl")
#myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
#stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput() # get polydata from vtk-surface
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s) #put triangles on ocl-surface
#s.rotate(-math.pi/2,math.pi,0)
stl2 = camvtk.STLSurf(triangleList= s.getTriangles() )
myscreen.addActor(stl2)
stl2.SetSurface()
stl2.SetColor(camvtk.cyan)
print "STL surface read,", s.size(), "triangles"
zh=-0.5
zheights=[ -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, -0.05] # for cavity
zheights=[ -0.1, 0.0, 0.1, 0.2, 0.3, 0.4 , 0.5, 0.6, 0.7] # for core
zheights=[ 10, 20, 30, 40, 50, 60] # for waterline1.stl
diam = 6
length = 100
loops = []
#cutter = ocl.CylCutter( diam , length )
cutter = ocl.BallCutter( diam , length )
#cutter = ocl.BullCutter( diam , diam/5, length )
"""
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(0.02)
#wl.setThreads(5)
t_before = time.time()
wl.run2()
t_after = time.time()
calctime = t_after-t_before
print " Waterline done in ", calctime," s"
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
"""
sampling=1
minSampling=0.1
aloops = []
for zh in zheights:
awl = ocl.AdaptiveWaterline()
awl.setSTL(s)
awl.setCutter(cutter)
awl.setZ(zh)
awl.setSampling(sampling)
awl.setMinSampling(minSampling)
#wl.setThreads(5)
t_before = time.time()
awl.run()
t_after = time.time()
calctime = t_after-t_before
print " AdaptiveWaterline done in ", calctime," s"
acutter_loops = awl.getLoops()
for l in acutter_loops:
aloops.append(l)
drawLoops(myscreen, aloops, camvtk.red)
print "done."
myscreen.camera.SetPosition(185, 153, 167)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(0,-4,0))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
AlanZatarain/opencamlib
|
scripts/waterline/waterline_7_cavity.py
|
Python
|
gpl-3.0
| 3,887
|
[
"VTK"
] |
0810e9272c75c1dad0a60267b8116b83262db810215108d04ead1b8d09953ce4
|
""" Module that holds DISET Authorization class for services
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Security import Properties
from DIRAC.Core.Utilities import List
__RCSID__ = "$Id$"
class AuthManager(object):
""" Handle Service Authorization
"""
__authLogger = gLogger.getSubLogger("Authorization")
KW_HOSTS_GROUP = 'hosts'
KW_DN = 'DN'
KW_GROUP = 'group'
KW_EXTRA_CREDENTIALS = 'extraCredentials'
KW_PROPERTIES = 'properties'
KW_USERNAME = 'username'
def __init__(self, authSection):
"""
Constructor
:type authSection: string
:param authSection: Section containing the authorization rules
"""
self.authSection = authSection
def authQuery(self, methodQuery, credDict, defaultProperties=False):
"""
Check if the query is authorized for a credentials dictionary
:type methodQuery: string
:param methodQuery: Method to test
:type credDict: dictionary
:param credDict: dictionary containing credentials for test. The dictionary can contain the DN
and selected group.
:return: Boolean result of test
"""
userString = ""
if self.KW_DN in credDict:
userString += "DN=%s" % credDict[self.KW_DN]
if self.KW_GROUP in credDict:
userString += " group=%s" % credDict[self.KW_GROUP]
if self.KW_EXTRA_CREDENTIALS in credDict:
userString += " extraCredentials=%s" % str(credDict[self.KW_EXTRA_CREDENTIALS])
self.__authLogger.debug("Trying to authenticate %s" % userString)
# Get properties
requiredProperties = self.getValidPropertiesForMethod(methodQuery, defaultProperties)
# Extract valid groups
validGroups = self.getValidGroups(requiredProperties)
lowerCaseProperties = [prop.lower() for prop in requiredProperties]
if not lowerCaseProperties:
lowerCaseProperties = ['any']
allowAll = "any" in lowerCaseProperties or "all" in lowerCaseProperties
# Set no properties by default
credDict[self.KW_PROPERTIES] = []
# Check non secure backends
if self.KW_DN not in credDict or not credDict[self.KW_DN]:
if allowAll and not validGroups:
self.__authLogger.debug("Accepted request from unsecure transport")
return True
else:
self.__authLogger.debug(
"Explicit property required and query seems to be coming through an unsecure transport")
return False
# Check if query comes though a gateway/web server
if self.forwardedCredentials(credDict):
self.__authLogger.debug("Query comes from a gateway")
self.unpackForwardedCredentials(credDict)
return self.authQuery(methodQuery, credDict, requiredProperties)
# Get the properties
# Check for invalid forwarding
if self.KW_EXTRA_CREDENTIALS in credDict:
# Invalid forwarding?
if not isinstance(credDict[self.KW_EXTRA_CREDENTIALS], six.string_types):
self.__authLogger.debug("The credentials seem to be forwarded by a host, but it is not a trusted one")
return False
# Is it a host?
if self.KW_EXTRA_CREDENTIALS in credDict and credDict[self.KW_EXTRA_CREDENTIALS] == self.KW_HOSTS_GROUP:
# Get the nickname of the host
credDict[self.KW_GROUP] = credDict[self.KW_EXTRA_CREDENTIALS]
# HACK TO MAINTAIN COMPATIBILITY
else:
if self.KW_EXTRA_CREDENTIALS in credDict and self.KW_GROUP not in credDict:
credDict[self.KW_GROUP] = credDict[self.KW_EXTRA_CREDENTIALS]
# END OF HACK
# Get the username
if self.KW_DN in credDict and credDict[self.KW_DN]:
if self.KW_GROUP not in credDict:
result = Registry.findDefaultGroupForDN(credDict[self.KW_DN])
if not result['OK']:
credDict[self.KW_USERNAME] = "anonymous"
credDict[self.KW_GROUP] = "visitor"
else:
credDict[self.KW_GROUP] = result['Value']
if credDict[self.KW_GROUP] == self.KW_HOSTS_GROUP:
# For host
if not self.getHostNickName(credDict):
self.__authLogger.warn("Host is invalid")
if not allowAll:
return False
# If all, then set anon credentials
credDict[self.KW_USERNAME] = "anonymous"
credDict[self.KW_GROUP] = "visitor"
else:
# For users
username = self.getUsername(credDict)
suspended = self.isUserSuspended(credDict)
if not username:
self.__authLogger.warn("User is invalid or does not belong to the group it's saying")
if suspended:
self.__authLogger.warn("User is Suspended")
if not username or suspended:
if not allowAll:
return False
# If all, then set anon credentials
credDict[self.KW_USERNAME] = "anonymous"
credDict[self.KW_GROUP] = "visitor"
else:
if not allowAll:
return False
credDict[self.KW_USERNAME] = "anonymous"
credDict[self.KW_GROUP] = "visitor"
# If any or all in the props, allow
allowGroup = not validGroups or credDict[self.KW_GROUP] in validGroups
if allowAll and allowGroup:
return True
# Check authorized groups
if "authenticated" in lowerCaseProperties and allowGroup:
return True
if not self.matchProperties(credDict, requiredProperties):
self.__authLogger.warn("Client is not authorized\nValid properties: %s\nClient: %s" %
(requiredProperties, credDict))
return False
elif not allowGroup:
self.__authLogger.warn("Client is not authorized\nValid groups: %s\nClient: %s" %
(validGroups, credDict))
return False
return True
def getHostNickName(self, credDict):
"""
Discover the host nickname associated to the DN.
The nickname will be included in the credentials dictionary.
:type credDict: dictionary
:param credDict: Credentials to ckeck
:return: Boolean specifying whether the nickname was found
"""
if self.KW_DN not in credDict:
return True
if self.KW_GROUP not in credDict:
return False
retVal = Registry.getHostnameForDN(credDict[self.KW_DN])
if not retVal['OK']:
gLogger.warn("Cannot find hostname for DN %s: %s" % (credDict[self.KW_DN], retVal['Message']))
return False
credDict[self.KW_USERNAME] = retVal['Value']
credDict[self.KW_PROPERTIES] = Registry.getPropertiesForHost(credDict[self.KW_USERNAME], [])
return True
def getValidPropertiesForMethod(self, method, defaultProperties=False):
"""
Get all authorized groups for calling a method
:type method: string
:param method: Method to test
:return: List containing the allowed groups
"""
authProps = gConfig.getValue("%s/%s" % (self.authSection, method), [])
if authProps:
return authProps
if defaultProperties:
self.__authLogger.debug("Using hardcoded properties for method %s : %s" % (method, defaultProperties))
if not isinstance(defaultProperties, (list, tuple)):
return List.fromChar(defaultProperties)
return defaultProperties
defaultPath = "%s/Default" % "/".join(method.split("/")[:-1])
authProps = gConfig.getValue("%s/%s" % (self.authSection, defaultPath), [])
if authProps:
self.__authLogger.debug("Method %s has no properties defined using %s" % (method, defaultPath))
return authProps
self.__authLogger.debug("Method %s has no authorization rules defined. Allowing no properties" % method)
return []
def getValidGroups(self, rawProperties):
""" Get valid groups as specified in the method authorization rules
:param rawProperties: all method properties
:type rawProperties: python:list
:return: list of allowed groups or []
"""
validGroups = []
for prop in list(rawProperties):
if prop.startswith('group:'):
rawProperties.remove(prop)
prop = prop.replace('group:', '')
validGroups.append(prop)
elif prop.startswith('vo:'):
rawProperties.remove(prop)
vo = prop.replace('vo:', '')
result = Registry.getGroupsForVO(vo)
if result['OK']:
validGroups.extend(result['Value'])
validGroups = list(set(validGroups))
return validGroups
def forwardedCredentials(self, credDict):
"""
Check whether the credentials are being forwarded by a valid source
:type credDict: dictionary
:param credDict: Credentials to ckeck
:return: Boolean with the result
"""
if self.KW_EXTRA_CREDENTIALS in credDict and isinstance(credDict[self.KW_EXTRA_CREDENTIALS], (tuple, list)):
if self.KW_DN in credDict:
retVal = Registry.getHostnameForDN(credDict[self.KW_DN])
if retVal['OK']:
hostname = retVal['Value']
if Properties.TRUSTED_HOST in Registry.getPropertiesForHost(hostname, []):
return True
return False
def unpackForwardedCredentials(self, credDict):
"""
Extract the forwarded credentials
:type credDict: dictionary
:param credDict: Credentials to unpack
"""
credDict[self.KW_DN] = credDict[self.KW_EXTRA_CREDENTIALS][0]
credDict[self.KW_GROUP] = credDict[self.KW_EXTRA_CREDENTIALS][1]
del(credDict[self.KW_EXTRA_CREDENTIALS])
def getUsername(self, credDict):
"""
Discover the username associated to the DN. It will check if the selected group is valid.
The username will be included in the credentials dictionary.
:type credDict: dictionary
:param credDict: Credentials to check
:return: Boolean specifying whether the username was found
"""
if self.KW_DN not in credDict:
return True
if self.KW_GROUP not in credDict:
result = Registry.findDefaultGroupForDN(credDict[self.KW_DN])
if not result['OK']:
return False
credDict[self.KW_GROUP] = result['Value']
credDict[self.KW_PROPERTIES] = Registry.getPropertiesForGroup(credDict[self.KW_GROUP], [])
usersInGroup = Registry.getUsersInGroup(credDict[self.KW_GROUP], [])
if not usersInGroup:
return False
retVal = Registry.getUsernameForDN(credDict[self.KW_DN], usersInGroup)
if retVal['OK']:
credDict[self.KW_USERNAME] = retVal['Value']
return True
return False
def isUserSuspended(self, credDict):
""" Discover if the user is in Suspended status
:param dict credDict: Credentials to check
:return: Boolean True if user is Suspended
"""
# Update credDict if the username is not there
if self.KW_USERNAME not in credDict:
self.getUsername(credDict)
# If username or group is not known we can not judge if the user is suspended
# These cases are treated elsewhere anyway
if self.KW_USERNAME not in credDict or self.KW_GROUP not in credDict:
return False
suspendedVOList = Registry.getUserOption(credDict[self.KW_USERNAME], 'Suspended', [])
if not suspendedVOList:
return False
vo = Registry.getVOForGroup(credDict[self.KW_GROUP])
if vo in suspendedVOList:
return True
return False
def matchProperties(self, credDict, validProps, caseSensitive=False):
"""
Return True if one or more properties are in the valid list of properties
:type props: list
:param props: List of properties to match
:type validProps: list
:param validProps: List of valid properties
:return: Boolean specifying whether any property has matched the valid ones
"""
# HACK: Map lower case properties to properties to make the check in lowercase but return the proper case
if not caseSensitive:
validProps = dict((prop.lower(), prop) for prop in validProps)
else:
validProps = dict((prop, prop) for prop in validProps)
groupProperties = credDict[self.KW_PROPERTIES]
foundProps = []
for prop in groupProperties:
if not caseSensitive:
prop = prop.lower()
if prop in validProps:
foundProps.append(validProps[prop])
credDict[self.KW_PROPERTIES] = foundProps
return foundProps
|
yujikato/DIRAC
|
src/DIRAC/Core/DISET/AuthManager.py
|
Python
|
gpl-3.0
| 12,276
|
[
"DIRAC"
] |
afe9064007b490991d8a3453ae7e3746247db27b4dce177c0076e115f1d41f58
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
# Run this test like so:
# vtkpython TestGlobFileNames.py -D $VTK_DATA_ROOT
import re
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestGlobFileNames(vtk.test.Testing.vtkTest):
def testGlobFileNames(self):
globFileNames = vtk.vtkGlobFileNames()
globFileNames.SetDirectory(VTK_DATA_ROOT + "/Data/")
# globs do not include Kleene star support for pattern repetitions thus
# we insert a pattern for both single and double digit file extensions.
globFileNames.AddFileNames("headsq/quarter.[1-9]")
globFileNames.AddFileNames("headsq/quarter.[1-9][0-9]")
fileNames = globFileNames.GetFileNames()
n = globFileNames.GetNumberOfFileNames()
if n != 93:
for i in range(0, n):
print "File:", i, " ", fileNames.GetValue(i)
print "GetNumberOfValues should return 93, returned", n
print"Listing of ", VTK_DATA_ROOT, "/Data/headsq"
directory = vtk.vtkDirectory()
directory.Open(VTK_DATA_ROOT + "/Data/headsq")
m = directory.GetNumberOfFiles()
for j in range(0, m):
print directory.GetFile(j)
exit(1)
for i in range(0, n):
filename = fileNames.GetValue(i)
if filename != globFileNames.GetNthFileName(i):
print "mismatched filename for pattern quarter.*:", filename
exit(1)
m = re.search("[\w|\W]*quarter.*", filename)
if m == None:
print "string does not match pattern quarter.*:", filename
# check that we can re-use the Glob object
globFileNames.Reset()
globFileNames.SetDirectory(VTK_DATA_ROOT + "/Data/")
globFileNames.AddFileNames(VTK_DATA_ROOT + "/Data/financial.*")
fileNames = globFileNames.GetFileNames()
n = fileNames.GetNumberOfValues()
for i in range(0, n):
filename = fileNames.GetValue(i)
if filename != globFileNames.GetNthFileName(i):
print "mismatched filename for pattern financial.*: ", filename
exit(1)
m = re.search("[\w|\W]*financial.*", filename)
if m == None:
print "string does not match pattern financial.*:", filename
exit(1)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestGlobFileNames, 'test')])
|
ashray/VTK-EVM
|
IO/Core/Testing/Python/TestGlobFileNames.py
|
Python
|
bsd-3-clause
| 3,167
|
[
"VTK"
] |
6ce6ceee969683d5710e9fbd2599a15ef9d0f85251e8078191b4d57ebb38e12c
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2017- Serge Noiraud
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for Gramps.
"""
from ._allplaces import AllPlaces
from ._hascitation import HasCitation
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._regexpidof import RegExpIdOf
from ._hasnote import HasNote
from ._hasnoteregexp import HasNoteRegexp
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasreferencecountof import HasReferenceCountOf
from ._hassourcecount import HasSourceCount
from ._hassourceof import HasSourceOf
from ._placeprivate import PlacePrivate
from ._matchesfilter import MatchesFilter
from ._hasplace import HasPlace
from ._hasdata import HasData
from ._hasnolatorlon import HasNoLatOrLon
from ._inlatlonneighborhood import InLatLonNeighborhood
from ._isenclosedby import IsEnclosedBy
from ._matcheseventfilter import MatchesEventFilter
from ._matchessourceconfidence import MatchesSourceConfidence
from ._changedsince import ChangedSince
from ._hastag import HasTag
from ._hastitle import HasTitle
from ._withinarea import WithinArea
editor_rule_list = [
AllPlaces,
HasCitation,
HasGallery,
HasIdOf,
RegExpIdOf,
HasNote,
HasNoteRegexp,
HasReferenceCountOf,
HasSourceCount,
HasSourceOf,
PlacePrivate,
MatchesFilter,
MatchesSourceConfidence,
HasData,
HasNoLatOrLon,
InLatLonNeighborhood,
MatchesEventFilter,
ChangedSince,
HasTag,
HasTitle,
WithinArea,
IsEnclosedBy
]
|
gramps-project/gramps
|
gramps/gen/filters/rules/place/__init__.py
|
Python
|
gpl-2.0
| 2,348
|
[
"Brian"
] |
dbd8fbf06f5e5513c0dbc2d242126aa3dc3ac7cea0d40f15d40aa96d87dd6e1c
|
# Copyright (c) 2012-2014 Stephanie T. Douglas
# under the MIT License (see LICENSE.txt for full details)
import numpy as np
import cPickle
import emcee
import matplotlib.pyplot as plt
def quantile(x,quantiles):
""" Calculates quantiles - taken from DFM's triangle.py """
xsorted = sorted(x)
qvalues = [xsorted[int(q * len(xsorted))] for q in quantiles]
return zip(quantiles,qvalues)
def rossby_model(p,Ro):
"""
computes the saturated/unsaturated activity model for a given parameter set
For Ro < turnover, the model values are equal to the saturation level
For Ro >= turnover, the model values follow a power-law with slope beta
Input
-----
p : array-like (3)
parameters for the model: saturation level, turnover, beta
Ro : array-like
Rossby number values. The model L_{whatever}/L_{bol} values will
be computed for these Rossby numbers
Output
------
y : numpy.ndarray (same size as Ro)
Model L_{whatever}/L_{bol} values corresponding to input Ro
"""
sat_level,turnover,beta = p[0],p[1],p[2]
y = np.ones(len(Ro))*sat_level
constant = sat_level/(turnover**beta)
un_sat = np.where(Ro>=turnover)[0]
y[un_sat] = constant*(Ro[un_sat]**beta)
return y
def lnprob(p,rossby_no,lha_lbol,err_ll):
"""
Calculates the natural log of the posterior probability for a given model
The probability calculation uses chi-squared.
Input
-----
p : array-like (3)
parameters for the model: saturation level, turnover, beta
rossby_no : array-like
Data Rossby number values
lha_lbol : array-like
Data activity values (L_{whatever}/L_{bol} - in my case
I was using L_{Halpha}/L_{bol})
error_ll : array-like
Uncertainties in the data activity values.
Output
------
lnprob : float
natural log of the posterior probability of p given the data
"""
sat_level,turnover,beta = p[0],p[1],p[2]
if ((sat_level>1e-1) or (sat_level<1e-8) or (turnover<0.001)
or (turnover>2) or (beta>2) or (beta<-6)):
return -np.inf
model_ll = rossby_model(p,rossby_no)
ln_prob = -0.5*(np.sum((lha_lbol-model_ll)**2/(err_ll**2)))
return ln_prob
def run_rossby_fit(start_p,data_rossby,data_ll,data_ull,
nwalkers=100,nsteps=2500):
"""
Sets up the emcee ensemble sampler, runs it, prints out the results,
then returns the samples.
Input
-----
start_p : (3)
starting guesses for the three model parameters
saturation level, turnover point, and power-law slope (beta)
data_rossby : array-like (ndata)
Data Rossby number values
data_ll : array-like (ndata)
Data activity values (L_{whatever}/L_{bol} - in my case
I was using L_{Halpha}/L_{bol})
data_ull : array-like (ndata)
Uncertainties in the data activity values.
Output
------
samples : array-like (nwalkers*nsteps,3)
all the samples from all the emcee walkers, reshaped so there's
just one column per parameter
"""
ndim = 3
p0 = np.zeros((nwalkers,ndim))
# initialize the walkers in a tiny gaussian ball around the starting point
for i in range(nwalkers):
p0[i] = start_p + (1e-2*np.random.randn(ndim)*start_p)
sampler = emcee.EnsembleSampler(nwalkers,ndim,lnprob,
args=[data_rossby,data_ll,data_ull])
pos,prob,state=sampler.run_mcmc(p0,nsteps/10)
sampler.reset()
pos,prob,state=sampler.run_mcmc(pos,nsteps)
sl_mcmc = quantile(sampler.flatchain[:,0],[.16,.5,.84])
to_mcmc = quantile(sampler.flatchain[:,1],[.16,.5,.84])
be_mcmc = quantile(sampler.flatchain[:,2],[.16,.5,.84])
print 'sat_level={0:.7f} +{1:.7f}/-{2:.7f}'.format(
sl_mcmc[1][1],sl_mcmc[1][1]-sl_mcmc[0][1],sl_mcmc[2][1]-sl_mcmc[1][1])
print 'turnover={0:.3f} +{1:.3f}/-{2:.3f}'.format(
to_mcmc[1][1],to_mcmc[1][1]-to_mcmc[0][1],to_mcmc[2][1]-to_mcmc[1][1])
print 'beta={0:.3f} +{1:.3f}/-{2:.3f}'.format(
be_mcmc[1][1],be_mcmc[1][1]-be_mcmc[0][1],be_mcmc[2][1]-be_mcmc[1][1])
samples = sampler.flatchain
return samples
def plot_rossby(samples,data_rossby,data_ll,data_ull,
plotfilename=None,ylabel=r'$L_{H\alpha}/L_{bol}$'):
"""
Plot fit results with data
Input
-----
samples : array-like (nwalkers*nsteps,3)
all the samples from all the emcee walkers, reshaped so there's
just one column per parameter
data_rossby : array-like (ndata)
Data Rossby number values
data_ll : array-like (ndata)
Data activity values (L_{whatever}/L_{bol} - in my case
I was using L_{Halpha}/L_{bol})
data_ull : array-like (ndata)
Uncertainties in the data activity values.
plotfilename : string (optional; default=None)
if not None, the plot will be saved using this filename
"""
sl_mcmc = quantile(samples[:,0],[.16,.5,.84])
to_mcmc = quantile(samples[:,1],[.16,.5,.84])
be_mcmc = quantile(samples[:,2],[.16,.5,.84])
plt.figure()
ax = plt.subplot(111)
ax.set_xscale('log')
ax.set_yscale('log')
# Just trying to reduce the number of plotted points...
xl = np.append(np.append(0.001,np.arange(0.08,0.15,0.001)),2.0)
# xl = np.arange(0.001,2.0,0.005)
for p in samples[np.random.randint(len(samples), size=100)]:
ax.plot(xl,rossby_model(p,xl),color='LightGrey')
sat_level = sl_mcmc[1][1]
turnover = to_mcmc[1][1]
x = np.asarray([turnover,2.0])
# x = np.arange(turnover,2.0,0.001)
constant = sat_level/(turnover**-1.)
ax.plot(x,constant*(x**-1.),'k--',lw=1.5,label=r'$\beta=\ -1$')
constant = sat_level/(turnover**-2.1)
ax.plot(x,constant*(x**-2.1),'k-.',lw=1.5,label=r'$\beta=\ -2.1$')
constant = sat_level/(turnover**-2.7)
ax.plot(x,constant*(x**-2.7),'k:',lw=2,label=r'$\beta=\ -2.7$')
star_color = 'BlueViolet'
ax.errorbar(data_rossby,data_ll,data_ull,color=star_color,fmt='*',capsize=0,
ms=12,mec=star_color)
ax.plot(xl,rossby_model([sl_mcmc[1][1],to_mcmc[1][1],be_mcmc[1][1]],xl),
'k-',lw=2,label=r'$\beta=\ {0:.1f}$'.format(be_mcmc[1][1]))
ax.set_ylabel(ylabel,fontsize='xx-large')
ax.set_xlabel('R_o',fontsize='x-large')
ax.set_xlim(1e-3,2)
ax.tick_params(labelsize='x-large')
ax.set_xticklabels((0.001,0.01,0.1,1))
handles, labels = ax.get_legend_handles_labels()
new_handles = np.append(handles[-1],handles[0:-1])
new_labels = np.append(labels[-1],labels[0:-1])
ax.legend(new_handles,new_labels,loc=3,
title=ylabel+r'\ \propto\ R_o^{\beta}$')
if plotfilename!=None:
plt.savefig(plotfilename)
def print_pdf(cropchain,filename,col_names=["sat_level,turnover,beta"]):
f = open(filename,"w")
f.write("# {}".format(col_names[0]))
for cname in col_names[1:]:
f.write(",{}".format(cname))
f.write("\n")
for i,p in enumerate(cropchain):
#print p
f.write(str(p[0]))
for this_p in p[1:]:
f.write(",{}".format(this_p))
f.write("\n")
f.close()
|
stephtdouglas/fit-rossby
|
fit_rossby.py
|
Python
|
mit
| 7,182
|
[
"Gaussian"
] |
299b963d6039af1fbe9fc96e44395b82dcd916f99a24f54e08b19aba272d7317
|
"""**Utilities for storage module**
"""
import os
import re
import copy
import numpy
import math
from osgeo import ogr
from safe.common.numerics import ensure_numeric
from safe.common.utilities import verify
# Default attribute to assign to vector layers
DEFAULT_ATTRIBUTE = 'Affected'
# Spatial layer file extensions that are recognised in Risiko
# FIXME: Perhaps add '.gml', '.zip', ...
LAYER_TYPES = ['.shp', '.asc', '.tif', '.tiff', '.geotif', '.geotiff']
# Map between extensions and ORG drivers
DRIVER_MAP = {'.sqlite': 'SQLITE',
'.shp': 'ESRI Shapefile',
'.gml': 'GML',
'.tif': 'GTiff',
'.asc': 'AAIGrid'}
# Map between Python types and OGR field types
# FIXME (Ole): I can't find a double precision type for OGR
TYPE_MAP = {type(None): ogr.OFTString, # What else should this be?
type(''): ogr.OFTString,
type(True): ogr.OFTInteger,
type(0): ogr.OFTInteger,
type(0.0): ogr.OFTReal,
type(numpy.array([0.0])[0]): ogr.OFTReal, # numpy.float64
type(numpy.array([[0.0]])[0]): ogr.OFTReal} # numpy.ndarray
# Map between verbose types and OGR geometry types
INVERSE_GEOMETRY_TYPE_MAP = {'point': ogr.wkbPoint,
'line': ogr.wkbLineString,
'polygon': ogr.wkbPolygon}
# Miscellaneous auxiliary functions
def _keywords_to_string(keywords, sublayer=None):
"""Create a string from a keywords dict.
Args:
* keywords: A required dictionary containing the keywords to stringify.
* sublayer: str optional group marker for a sub layer.
Returns:
str: a String containing the rendered keywords list
Raises:
Any exceptions are propogated.
.. note: Only simple keyword dicts should be passed here, not multilayer
dicts.
For example you pass a dict like this::
{'datatype': 'osm',
'category': 'exposure',
'title': 'buildings_osm_4326',
'subcategory': 'building',
'purpose': 'dki'}
and the following string would be returned:
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
If sublayer is provided e.g. _keywords_to_string(keywords, sublayer='foo'),
the following:
[foo]
datatype: osm
category: exposure
title: buildings_osm_4326
subcategory: building
purpose: dki
"""
# Write
result = ''
if sublayer is not None:
result = '[%s]\n' % sublayer
for k, v in keywords.items():
# Create key
msg = ('Key in keywords dictionary must be a string. '
'I got %s with type %s' % (k, str(type(k))[1:-1]))
verify(isinstance(k, basestring), msg)
key = k
msg = ('Key in keywords dictionary must not contain the ":" '
'character. I got "%s"' % key)
verify(':' not in key, msg)
# Create value
msg = ('Value in keywords dictionary must be convertible to a string. '
'For key %s, I got %s with type %s'
% (k, v, str(type(v))[1:-1]))
try:
val = str(v)
except:
raise Exception(msg)
# Store
result += '%s: %s\n' % (key, val)
return result
def write_keywords(keywords, filename, sublayer=None):
"""Write keywords dictonary to file
Args:
* keywords: Dictionary of keyword, value pairs
filename: Name of keywords file. Extension expected to be
.keywords
* sublayer: str Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. **If the keywords file contains sublayer
definitions but no sublayer was defined, keywords file content
will be removed and replaced with only the keywords provided
here.**
Returns: None
Raises: None
A keyword file with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Keys must be strings not containing the ":" character
Values can be anything that can be converted to a string (using
Python's str function)
Surrounding whitespace is removed from values, but keys are unmodified
The reason being that keys must always be valid for the dictionary they
came from. For values we have decided to be flexible and treat entries like
'unit:m' the same as 'unit: m', or indeed 'unit: m '.
Otherwise, unintentional whitespace in values would lead to surprising
errors in the application.
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords' % (filename, basename))
verify(ext == '.keywords', msg)
# First read any keywords out of the file so that we can retain
# keywords for other sublayers
existing_keywords = read_keywords(filename, all_blocks=True)
first_value = None
if len(existing_keywords) > 0:
first_value = existing_keywords[existing_keywords.keys()[0]]
multilayer_flag = type(first_value) == dict
handle = file(filename, 'wt')
if multilayer_flag:
if sublayer is not None and sublayer != '':
#replace existing keywords / add new for this layer
existing_keywords[sublayer] = keywords
for key, value in existing_keywords.iteritems():
handle.write(_keywords_to_string(value, sublayer=key))
handle.write('\n')
else:
# It is currently a multilayer but we will replace it with
# a single keyword block since the user passed no sublayer
handle.write(_keywords_to_string(keywords))
else:
#currently a simple layer so replace it with our content
handle.write(_keywords_to_string(keywords, sublayer=sublayer))
handle.close()
def read_keywords(filename, sublayer=None, all_blocks=False):
"""Read keywords dictionary from file
Args:
* filename: Name of keywords file. Extension expected to be .keywords
The format of one line is expected to be either
string: string or string
* sublayer: str Optional sublayer applicable only to multilayer formats
such as sqlite or netcdf which can potentially hold more than
one layer. The string should map to the layer group as per the
example below. If the keywords file contains sublayer definitions
but no sublayer was defined, the first layer group will be
returned.
* all_blocks: bool Optional, defaults to False. If True will return
a dict of dicts, where the top level dict entries each represent
a sublayer, and the values of that dict will be dicts of keyword
entries.
Returns:
keywords: Dictionary of keyword, value pairs
Raises: None
A keyword layer with sublayers may look like this:
[osm_buildings]
datatype: osm
category: exposure
subcategory: building
purpose: dki
title: buildings_osm_4326
[osm_flood]
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
Wheras a simple keywords file would look like this
datatype: flood
category: hazard
subcategory: building
title: flood_osm_4326
If filename does not exist, an empty dictionary is returned
Blank lines are ignored
Surrounding whitespace is removed from values, but keys are unmodified
If there are no ':', then the keyword is treated as a key with no value
"""
# Input checks
basename, ext = os.path.splitext(filename)
msg = ('Unknown extension for file %s. '
'Expected %s.keywords' % (filename, basename))
verify(ext == '.keywords', msg)
if not os.path.isfile(filename):
return {}
# Read all entries
blocks = {}
keywords = {}
fid = open(filename, 'r')
current_block = None
first_keywords = None
for line in fid.readlines():
# Remove trailing (but not preceeding!) whitespace
text = line.rstrip()
# Ignore blank lines
if text == '':
continue
# Check if it is an ini style group header
block_flag = re.search(r'^\[.*]$', text, re.M | re.I)
if block_flag:
# Write the old block if it exists - must have a current
# block to prevent orphans
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None and len(keywords) > 0:
first_keywords = keywords
# now set up for a new block
current_block = text[1:-1]
# reset the keywords each time we encounter a new block
# until we know we are on the desired one
keywords = {}
continue
if ':' not in text:
key = text.strip()
val = None
else:
# Get splitting point
idx = text.find(':')
# Take key as everything up to the first ':'
key = text[:idx]
# Take value as everything after the first ':'
val = text[idx + 1:].strip()
# Add entry to dictionary
keywords[key] = val
fid.close()
# Write our any unfinalised block data
if len(keywords) > 0 and current_block is not None:
blocks[current_block] = keywords
if first_keywords is None:
first_keywords = keywords
# Ok we have generated a structure that looks like this:
# blocks = {{ 'foo' : { 'a': 'b', 'c': 'd'},
# { 'bar' : { 'd': 'e', 'f': 'g'}}
# where foo and bar are sublayers and their dicts are the sublayer keywords
if all_blocks:
return blocks
if sublayer is not None:
if sublayer in blocks:
return blocks[sublayer]
else:
return first_keywords
def geotransform2bbox(geotransform, columns, rows):
"""Convert geotransform to bounding box
Args :
* geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
* columns: Number of columns in grid
* rows: Number of rows in grid
Returns:
* bbox: Bounding box as a list of geographic coordinates
[west, south, east, north]
Note:
Rows and columns are needed to determine eastern and northern bounds.
FIXME: Not sure if the pixel vs gridline registration issue is observed
correctly here. Need to check against gdal > v1.7
"""
x_origin = geotransform[0] # top left x
y_origin = geotransform[3] # top left y
x_res = geotransform[1] # w-e pixel resolution
y_res = geotransform[5] # n-s pixel resolution
x_pix = columns
y_pix = rows
minx = x_origin
maxx = x_origin + (x_pix * x_res)
miny = y_origin + (y_pix * y_res)
maxy = y_origin
return [minx, miny, maxx, maxy]
def geotransform2resolution(geotransform, isotropic=False,
rtol=1.0e-6, atol=1.0e-8):
"""Convert geotransform to resolution
Args:
* geotransform: GDAL geotransform (6-tuple).
(top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution).
See e.g. http://www.gdal.org/gdal_tutorial.html
* isotropic: If True, verify that dx == dy and return dx
If False (default) return 2-tuple (dx, dy)
* rtol, atol: Used to control how close dx and dy must be
to quality for isotropic. These are passed on to
numpy.allclose for comparison.
Returns:
* resolution: grid spacing (resx, resy) in (positive) decimal
degrees ordered as longitude first, then latitude.
or resx (if isotropic is True)
"""
resx = geotransform[1] # w-e pixel resolution
resy = -geotransform[5] # n-s pixel resolution (always negative)
if isotropic:
msg = ('Resolution requested with '
'isotropic=True, but '
'resolutions in the horizontal and vertical '
'are different: resx = %.12f, resy = %.12f. '
% (resx, resy))
verify(numpy.allclose(resx, resy,
rtol=rtol, atol=atol), msg)
return resx
else:
return resx, resy
def raster_geometry2geotransform(longitudes, latitudes):
"""Convert vectors of longitudes and latitudes to geotransform
Note:
This is the inverse operation of Raster.get_geometry().
Args:
* longitudes, latitudes: Vectors of geographic coordinates
Returns:
* geotransform: 6-tuple (top left x, w-e pixel resolution, rotation,
top left y, rotation, n-s pixel resolution)
"""
nx = len(longitudes)
ny = len(latitudes)
msg = ('You must specify more than 1 longitude to make geotransform: '
'I got %s' % str(longitudes))
verify(nx > 1, msg)
msg = ('You must specify more than 1 latitude to make geotransform: '
'I got %s' % str(latitudes))
verify(ny > 1, msg)
dx = float(longitudes[1] - longitudes[0]) # Longitudinal resolution
dy = float(latitudes[0] - latitudes[1]) # Latitudinal resolution (neg)
# Define pixel centers along each directions
# This is to achieve pixel registration rather
# than gridline registration
dx2 = dx / 2
dy2 = dy / 2
geotransform = (longitudes[0] - dx2, # Longitude of upper left corner
dx, # w-e pixel resolution
0, # rotation
latitudes[-1] - dy2, # Latitude of upper left corner
0, # rotation
dy) # n-s pixel resolution
return geotransform
def bbox_intersection(*args):
"""Compute intersection between two or more bounding boxes
Args:
* args: two or more bounding boxes.
Each is assumed to be a list or a tuple with
four coordinates (W, S, E, N)
Returns:
* result: The minimal common bounding box
"""
msg = 'Function bbox_intersection must take at least 2 arguments.'
verify(len(args) > 1, msg)
result = [-180, -90, 180, 90]
for a in args:
msg = ('Bounding box expected to be a list of the '
'form [W, S, E, N]. '
'Instead i got "%s"' % str(a))
try:
box = list(a)
except:
raise Exception(msg)
verify(len(box) == 4, msg)
msg = 'Western boundary must be less than eastern. I got %s' % box
verify(box[0] < box[2], msg)
msg = 'Southern boundary must be less than northern. I got %s' % box
verify(box[1] < box[3], msg)
# Compute intersection
# West and South
for i in [0, 1]:
result[i] = max(result[i], box[i])
# East and North
for i in [2, 3]:
result[i] = min(result[i], box[i])
# Check validity and return
if result[0] < result[2] and result[1] < result[3]:
return result
else:
return None
def minimal_bounding_box(bbox, min_res, eps=1.0e-6):
"""Grow bounding box to exceed specified resolution if needed
Args:
* bbox: Bounding box with format [W, S, E, N]
* min_res: Minimal acceptable resolution to exceed
* eps: Optional tolerance that will be applied to 'buffer' result
Returns:
* Adjusted bounding box guaranteed to exceed specified resolution
"""
# FIXME (Ole): Probably obsolete now
bbox = copy.copy(list(bbox))
delta_x = bbox[2] - bbox[0]
delta_y = bbox[3] - bbox[1]
if delta_x < min_res:
dx = (min_res - delta_x) / 2 + eps
bbox[0] -= dx
bbox[2] += dx
if delta_y < min_res:
dy = (min_res - delta_y) / 2 + eps
bbox[1] -= dy
bbox[3] += dy
return bbox
def buffered_bounding_box(bbox, resolution):
"""Grow bounding box with one unit of resolution in each direction
Note:
This will ensure there is enough pixels to robustly provide
interpolated values without having to painstakingly deal with
all corner cases such as 1 x 1, 1 x 2 and 2 x 1 arrays.
The border will also make sure that points that would otherwise fall
outside the domain (as defined by a tight bounding box) get assigned
values.
Args:
* bbox: Bounding box with format [W, S, E, N]
* resolution: (resx, resy) - Raster resolution in each direction.
res - Raster resolution in either direction
If resolution is None bbox is returned unchanged.
Returns:
* Adjusted bounding box
Note:
Case in point: Interpolation point O would fall outside this domain
even though there are enough grid points to support it
--------------
| |
| * * | * *
| O|
| |
| * * | * *
--------------
"""
bbox = copy.copy(list(bbox))
if resolution is None:
return bbox
try:
resx, resy = resolution
except TypeError:
resx = resy = resolution
bbox[0] -= resx
bbox[1] -= resy
bbox[2] += resx
bbox[3] += resy
return bbox
def get_geometry_type(geometry, geometry_type):
"""Determine geometry type based on data
Args:
* geometry: A list of either point coordinates [lon, lat] or polygons
which are assumed to be numpy arrays of coordinates
* geometry_type: Optional type - 'point', 'line', 'polygon' or None
Returns:
* geometry_type: Either ogr.wkbPoint, ogr.wkbLineString or
ogr.wkbPolygon
Note:
If geometry type cannot be determined an Exception is raised.
There is no consistency check across all entries of the
geometry list, only the first element is used in this determination.
"""
# FIXME (Ole): Perhaps use OGR's own symbols
msg = ('Argument geometry_type must be either "point", "line", '
'"polygon" or None')
verify(geometry_type is None or
geometry_type in [1, 2, 3] or
geometry_type.lower() in ['point', 'line', 'polygon'], msg)
if geometry_type is not None:
if isinstance(geometry_type, basestring):
return INVERSE_GEOMETRY_TYPE_MAP[geometry_type.lower()]
else:
return geometry_type
# FIXME (Ole): Should add some additional checks to see if choice
# makes sense
msg = 'Argument geometry must be a sequence. I got %s ' % type(geometry)
verify(is_sequence(geometry), msg)
if len(geometry) == 0:
# Default to point if there is no data
return ogr.wkbPoint
msg = ('The first element in geometry must be a sequence of length > 2. '
'I got %s ' % str(geometry[0]))
verify(is_sequence(geometry[0]), msg)
verify(len(geometry[0]) >= 2, msg)
if len(geometry[0]) == 2:
try:
float(geometry[0][0])
float(geometry[0][1])
except (ValueError, TypeError, IndexError):
pass
else:
# This geometry appears to be point data
geometry_type = ogr.wkbPoint
elif len(geometry[0]) > 2:
try:
x = numpy.array(geometry[0])
except ValueError:
pass
else:
# This geometry appears to be polygon data
if x.shape[0] > 2 and x.shape[1] == 2:
geometry_type = ogr.wkbPolygon
if geometry_type is None:
msg = 'Could not determine geometry type'
raise Exception(msg)
return geometry_type
def is_sequence(x):
"""Determine if x behaves like a true sequence but not a string
Note:
This will for example return True for lists, tuples and numpy arrays
but False for strings and dictionaries.
"""
if isinstance(x, basestring):
return False
try:
list(x)
except TypeError:
return False
else:
return True
def array2wkt(A, geom_type='POLYGON'):
"""Convert coordinates to wkt format
Args:
* A: Nx2 Array of coordinates representing either a polygon or a line.
A can be either a numpy array or a list of coordinates.
* geom_type: Determines output keyword 'POLYGON' or 'LINESTRING'
Returns:
* wkt: geometry in the format known to ogr: Examples
Note:
POLYGON((1020 1030,1020 1045,1050 1045,1050 1030,1020 1030))
LINESTRING(1000 1000, 1100 1050)
"""
try:
A = ensure_numeric(A, numpy.float)
except Exception, e:
msg = ('Array (%s) could not be converted to numeric array. '
'I got type %s. Error message: %s'
% (geom_type, str(type(A)), e))
raise Exception(msg)
msg = 'Array must be a 2d array of vertices. I got %s' % (str(A.shape))
verify(len(A.shape) == 2, msg)
msg = 'A array must have two columns. I got %s' % (str(A.shape[0]))
verify(A.shape[1] == 2, msg)
if geom_type == 'LINESTRING':
# One bracket
n = 1
elif geom_type == 'POLYGON':
# Two brackets (tsk tsk)
n = 2
else:
msg = 'Unknown geom_type: %s' % geom_type
raise Exception(msg)
wkt_string = geom_type + '(' * n
N = len(A)
for i in range(N):
# Works for both lists and arrays
wkt_string += '%f %f, ' % tuple(A[i])
return wkt_string[:-2] + ')' * n
# Map of ogr numerical geometry types to their textual representation
# FIXME (Ole): Some of them don't exist, even though they show up
# when doing dir(ogr) - Why?:
geometry_type_map = {ogr.wkbPoint: 'Point',
ogr.wkbPoint25D: 'Point25D',
ogr.wkbPolygon: 'Polygon',
ogr.wkbPolygon25D: 'Polygon25D',
#ogr.wkbLinePoint: 'LinePoint', # ??
ogr.wkbGeometryCollection: 'GeometryCollection',
ogr.wkbGeometryCollection25D: 'GeometryCollection25D',
ogr.wkbLineString: 'LineString',
ogr.wkbLineString25D: 'LineString25D',
ogr.wkbLinearRing: 'LinearRing',
ogr.wkbMultiLineString: 'MultiLineString',
ogr.wkbMultiLineString25D: 'MultiLineString25D',
ogr.wkbMultiPoint: 'MultiPoint',
ogr.wkbMultiPoint25D: 'MultiPoint25D',
ogr.wkbMultiPolygon: 'MultiPolygon',
ogr.wkbMultiPolygon25D: 'MultiPolygon25D',
ogr.wkbNDR: 'NDR',
ogr.wkbNone: 'None',
ogr.wkbUnknown: 'Unknown'}
def geometrytype2string(g_type):
"""Provides string representation of numeric geometry types
FIXME (Ole): I can't find anything like this in ORG. Why?
"""
if g_type in geometry_type_map:
return geometry_type_map[g_type]
elif g_type is None:
return 'No geometry type assigned'
else:
return 'Unknown geometry type: %s' % str(g_type)
# FIXME: Move to common numerics area along with polygon.py
def calculate_polygon_area(polygon, signed=False):
"""Calculate the signed area of non-self-intersecting polygon
Args:
* polygon: Numeric array of points (longitude, latitude). It is assumed
to be closed, i.e. first and last points are identical
* signed: Optional flag deciding whether returned area retains its
sign:
If points are ordered counter clockwise, the signed area
will be positive.
If points are ordered clockwise, it will be negative
Default is False which means that the area is always
positive.
Returns:
* area: Area of polygon (subject to the value of argument signed)
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
msg = ('Polygon is assumed to consist of coordinate pairs. '
'I got second dimension %i instead of 2' % P.shape[1])
verify(P.shape[1] == 2, msg)
x = P[:, 0]
y = P[:, 1]
# Calculate 0.5 sum_{i=0}^{N-1} (x_i y_{i+1} - x_{i+1} y_i)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
A = numpy.sum(a - b) / 2.
if signed:
return A
else:
return abs(A)
def calculate_polygon_centroid(polygon):
"""Calculate the centroid of non-self-intersecting polygon
Args:
* polygon: Numeric array of points (longitude, latitude). It is assumed
to be closed, i.e. first and last points are identical
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(polygon)
# Normalise to ensure numerical accurracy.
# This requirement in backed by tests in test_io.py and without it
# centroids at building footprint level may get shifted outside the
# polygon!
P_origin = numpy.amin(P, axis=0)
P = P - P_origin
# Get area. This calculation could be incorporated to save time
# if necessary as the two formulas are very similar.
A = calculate_polygon_area(polygon, signed=True)
x = P[:, 0]
y = P[:, 1]
# Calculate
# Cx = sum_{i=0}^{N-1} (x_i + x_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
# Cy = sum_{i=0}^{N-1} (y_i + y_{i+1})(x_i y_{i+1} - x_{i+1} y_i)/(6A)
a = x[:-1] * y[1:]
b = y[:-1] * x[1:]
cx = x[:-1] + x[1:]
cy = y[:-1] + y[1:]
Cx = numpy.sum(cx * (a - b)) / (6. * A)
Cy = numpy.sum(cy * (a - b)) / (6. * A)
# Translate back to real location
C = numpy.array([Cx, Cy]) + P_origin
return C
def points_between_points(point1, point2, delta):
"""Creates an array of points between two points given a delta
Note:
u = (x1-x0, y1-y0)/L, where
L=sqrt( (x1-x0)^2 + (y1-y0)^2).
If r is the resolution, then the
points will be given by
(x0, y0) + u * n * r for n = 1, 2, ....
while len(n*u*r) < L
"""
x0, y0 = point1
x1, y1 = point2
L = math.sqrt(math.pow((x1 - x0), 2) + math.pow((y1 - y0), 2))
pieces = int(L / delta)
uu = numpy.array([x1 - x0, y1 - y0]) / L
points = [point1]
for nn in range(pieces):
point = point1 + uu * (nn + 1) * delta
points.append(point)
return numpy.array(points)
def points_along_line(line, delta):
"""Calculate a list of points along a line with a given delta
Args:
* line: Numeric array of points (longitude, latitude).
* delta: Decimal number to be used as step
Returns:
* V: Numeric array of points (longitude, latitude).
Note:
Sources
http://paulbourke.net/geometry/polyarea/
http://en.wikipedia.org/wiki/Centroid
"""
# Make sure it is numeric
P = numpy.array(line)
points = []
for i in range(len(P) - 1):
pts = points_between_points(P[i], P[i + 1], delta)
# If the first point of this list is the same
# as the last one recorded, do not use it
if len(points) > 0:
if numpy.allclose(points[-1], pts[0]):
pts = pts[1:]
points.extend(pts)
C = numpy.array(points)
return C
|
ingenieroariel/inasafe
|
safe/storage/utilities.py
|
Python
|
gpl-3.0
| 28,632
|
[
"NetCDF"
] |
66969c8375af8a60d6b37535023292428c778ae1b190977a2f730aa63a329fe9
|
from pybrain.structure import FeedForwardNetwork
from pybrain.structure import LinearLayer, SigmoidLayer
from pybrain.structure import BiasUnit,TanhLayer
from pybrain.structure import FullConnection
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer, RPropMinusTrainer
from pybrain.tools.validation import ModuleValidator,Validator
from pybrain.utilities import percentError
from pybrain.tools.customxml import NetworkWriter
import numpy
import pylab
import os
def myplot(trns,ctrns,tsts = None,ctsts = None,iter = 0):
plotdir = os.path.join(os.getcwd(),'plot')
pylab.clf()
try:
assert len(tsts) > 1
tstsplot = True
except:
tstsplot = False
try:
assert len(ctsts) > 1
ctstsplot = True
except:
ctstsplot = False
if tstsplot:
pylab.plot(tsts['input'],tsts['target'],c='b')
pylab.scatter(trns['input'],trns['target'],c='r')
pylab.scatter(trns['input'],ctrns,c='y')
if tstsplot and ctstsplot:
pylab.plot(tsts['input'], ctsts,c='g')
pylab.xlabel('x')
pylab.ylabel('y')
pylab.title('Neuron Number:'+str(nneuron))
pylab.grid(True)
plotname = os.path.join(plotdir,('jpq2layers_plot'+ str(iter)))
pylab.savefig(plotname)
# set-up the neural network
nneuron = 5
mom = 0.98
netname="LSL-"+str(nneuron)+"-"+str(mom)
mv=ModuleValidator()
v = Validator()
n=FeedForwardNetwork(name=netname)
inLayer = LinearLayer(1,name='in')
hiddenLayer = SigmoidLayer(nneuron,name='hidden0')
outLayer = LinearLayer(1,name='out')
biasinUnit = BiasUnit(name="bhidden0")
biasoutUnit = BiasUnit(name="bout")
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addModule(biasinUnit)
n.addModule(biasoutUnit)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer,hiddenLayer)
bias_to_hidden = FullConnection(biasinUnit,hiddenLayer)
bias_to_out = FullConnection(biasoutUnit,outLayer)
hidden_to_out = FullConnection(hiddenLayer,outLayer)
n.addConnection(in_to_hidden)
n.addConnection(bias_to_hidden)
n.addConnection(bias_to_out)
n.addConnection(hidden_to_out)
n.sortModules()
n.reset()
#read the initail weight values from myparam2.txt
filetoopen = os.path.join(os.getcwd(),'myparam2.txt')
if os.path.isfile(filetoopen):
myfile = open('myparam2.txt','r')
c=[]
for line in myfile:
c.append(float(line))
n._setParameters(c)
else:
myfile = open('myparam2.txt','w')
for i in n.params:
myfile.write(str(i)+'\n')
myfile.close()
#activate the neural networks
act = SupervisedDataSet(1,1)
act.addSample((0.2,),(0.880422606518061,))
n.activateOnDataset(act)
#create the test DataSet
x = numpy.arange(0.0, 1.0+0.01, 0.01)
s = 0.5+0.4*numpy.sin(2*numpy.pi*x)
tsts = SupervisedDataSet(1,1)
tsts.setField('input',x.reshape(len(x),1))
tsts.setField('target',s.reshape(len(s),1))
#read the train DataSet from file
trndata = SupervisedDataSet.loadFromFile(os.path.join(os.getcwd(),'trndata'))
#create the trainer
t = BackpropTrainer(n, learningrate = 0.01 ,
momentum = mom)
#train the neural network from the train DataSet
cterrori=1.0
print "trainer momentum:"+str(mom)
for iter in range(25):
t.trainOnDataset(trndata, 1000)
ctrndata = mv.calculateModuleOutput(n,trndata)
cterr = v.MSE(ctrndata,trndata['target'])
relerr = abs(cterr-cterrori)
cterrori = cterr
print 'iteration:',iter+1,'MSE error:',cterr
myplot(trndata,ctrndata,iter=iter+1)
if cterr < 1.e-5 or relerr < 1.e-7:
break
#write the network using xml file
myneuralnet = os.path.join(os.getcwd(),'myneuralnet.xml')
if os.path.isfile(myneuralnet):
NetworkWriter.appendToFile(n,myneuralnet)
else:
NetworkWriter.writeToFile(n,myneuralnet)
#calculate the test DataSet based on the trained Neural Network
ctsts = mv.calculateModuleOutput(n,tsts)
tserr = v.MSE(ctsts,tsts['target'])
print 'MSE error on TSTS:',tserr
myplot(trndata,ctrndata,tsts,ctsts)
pylab.show()
|
fxsjy/pybrain
|
examples/supervised/test_network_read_write/jpq2layersWriter.py
|
Python
|
bsd-3-clause
| 3,945
|
[
"NEURON"
] |
a4784f247035f5074ea9cf769258045093caf317aa9fbb4625f36699d9670a83
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
tutorial, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@TUTORIALS_DIR@/01-lennard_jones/01-lennard_jones.py")
@skipIfMissingFeatures
class Tutorial(ut.TestCase):
system = tutorial.system
def test_global_variables(self):
self.assertLess(tutorial.standard_error_total_energy, 2.5)
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/scripts/tutorials/test_01-lennard_jones.py
|
Python
|
gpl-3.0
| 1,119
|
[
"ESPResSo"
] |
556c1826c4bf483eb7f397d4e138f9fd06f50d9dd0b6b84965d35b8c666d4d7b
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2014 Paul Franklin
# Copyright (C) 2010-2015 Craig J. Anderson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Graphical Reports/Ancestor Tree"""
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gramps modules
#
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.errors import ReportError
from gramps.gen.plug.menu import (TextOption, NumberOption, BooleanOption,
EnumeratedListOption, StringOption,
PersonOption)
from gramps.gen.plug.report import Report, MenuReportOptions, stdoptions
from gramps.gen.plug.report import utils
from gramps.gen.plug.docgen import (FontStyle, ParagraphStyle, GraphicsStyle,
FONT_SANS_SERIF, PARA_ALIGN_CENTER)
from gramps.plugins.lib.libtreebase import *
from gramps.plugins.lib.librecurse import AscendPerson
from gramps.gen.proxy import CacheProxyDb
from gramps.gen.display.name import displayer as _nd
PT2CM = utils.pt2cm
#cm2pt = utils.cm2pt
#------------------------------------------------------------------------
#
# Constants
#
#------------------------------------------------------------------------
_BORN = _("birth abbreviation|b."),
_DIED = _("death abbreviation|d."),
_MARR = _("marriage abbreviation|m."),
LVL_GEN, LVL_INDX, LVL_Y = range(3)
#------------------------------------------------------------------------
#
# Box classes
#
#------------------------------------------------------------------------
class PersonBox(BoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "AC2-box"
#self.level = (level[0]-1, level[1])
self.level = level
def __lt__(self, other):
return self.level[LVL_Y] < other.level[LVL_Y]
class FamilyBox(BoxBase):
"""
Calculates information about the box that will print on a page
"""
def __init__(self, level):
BoxBase.__init__(self)
self.boxstr = "AC2-fam-box"
#self.level = (level[0]-1, level[1])
self.level = level
def __lt__(self, other):
return self.level[LVL_Y] < other.level[LVL_Y]
#------------------------------------------------------------------------
#
# Titles Class(es)
#
#------------------------------------------------------------------------
class TitleN(TitleNoDisplay):
"""No Title class for the report """
def __init__(self, doc, locale):
TitleNoDisplay.__init__(self, doc, "AC2-Title-box")
self._ = locale.translation.sgettext
def calc_title(self, center):
"""Calculate the title of the report"""
#we want no text, but need a text for the TOC in a book!
self.mark_text = self._("Ancestor Graph")
self.text = ''
class TitleA(TitleBox):
"""Title class for the report """
def __init__(self, doc, locale, name_displayer):
self._nd = name_displayer
TitleBox.__init__(self, doc, "AC2-Title-box")
self._ = locale.translation.sgettext
def calc_title(self, center):
"""Calculate the title of the report"""
name = ""
if center is not None:
name = self._nd.display(center)
# feature request 2356: avoid genitive form
self.text = self._("Ancestor Graph for %s") % name
self.set_box_height_width()
#------------------------------------------------------------------------
#
# CalcItems (helper class to calculate text)
# make_ancestor_tree (main recursive functions)
#
#------------------------------------------------------------------------
class CalcItems:
""" A helper class to calculate the default box text
and text for each person / marriage
"""
def __init__(self, dbase):
_gui = GUIConnect()
self._gui = _gui
#calculate the printed lines for each box
#str = ""
#if self.get_val('miss_val'):
# str = "_____"
display_repl = _gui.get_val("replace_list")
self.__calc_l = CalcLines(dbase, display_repl, _gui.locale, _gui.n_d)
self.__blank_father = None
self.__blank_mother = None
self.__blank_father = \
self.__calc_l.calc_lines(None, None, _gui.get_val("father_disp"))
self.__blank_mother = \
self.__calc_l.calc_lines(None, None, _gui.get_val("mother_disp"))
self.center_use = _gui.get_val("center_uses")
self.disp_father = _gui.get_val("father_disp")
self.disp_mother = _gui.get_val("mother_disp")
self.disp_marr = [_gui.get_val("marr_disp")]
self.__blank_marriage = \
self.__calc_l.calc_lines(None, None, self.disp_marr)
def calc_person(self, index, indi_handle, fams_handle):
working_lines = ""
if index[1] % 2 == 0 or (index[1] == 1 and self.center_use == 0):
if indi_handle == fams_handle == None:
working_lines = self.__calc_l.calc_lines(
None, None, self._gui.get_val("father_disp"))
else:
working_lines = self.disp_father
else:
if indi_handle == fams_handle == None:
working_lines = self.__calc_l.calc_lines(
None, None, self._gui.get_val("mother_disp"))
else:
working_lines = self.disp_mother
if indi_handle == fams_handle == None:
return working_lines
else:
return self.__calc_l.calc_lines(indi_handle, fams_handle,
working_lines)
def calc_marriage(self, indi_handle, fams_handle):
if indi_handle == fams_handle == None:
return self.__blank_marriage
else:
return self.__calc_l.calc_lines(indi_handle, fams_handle,
self.disp_marr)
class MakeAncestorTree(AscendPerson):
"""
The main procedure to use recursion to make the tree based off of a person.
order of people inserted into Persons is important.
makes sure that order is done correctly.
"""
def __init__(self, dbase, canvas):
_gui = GUIConnect()
AscendPerson.__init__(self, dbase, _gui.maxgen(), _gui.fill_out())
self.database = dbase
self.canvas = canvas
self.inlc_marr = _gui.inc_marr()
self.inc_sib = _gui.inc_sib()
self.compress_tree = _gui.compress_tree()
self.center_family = None
self.lines = [None] * (_gui.maxgen() + 1)
self.max_generation = 0
self.calc_items = CalcItems(self.database)
def add_person(self, index, indi_handle, fams_handle):
""" Makes a person box and add that person into the Canvas. """
#print str(index) + " add_person " + str(indi_handle)
myself = PersonBox((index[0]-1,) + index[1:])
if index[LVL_GEN] == 1: #Center Person
self.center_family = fams_handle
if index[LVL_GEN] > self.max_generation:
self.max_generation = index[LVL_GEN]
myself.text = self.calc_items.calc_person(index,
indi_handle, fams_handle)
# myself.text[0] = myself.text[0] + ' ' + repr(index) # for debugging
if indi_handle is not None: # None is legal for an empty box
myself.add_mark(self.database,
self.database.get_person_from_handle(indi_handle))
self.canvas.add_box(myself)
#make the lines
indx = index[LVL_GEN]
self.lines[indx] = myself
if indx > 1:
if self.lines[indx-1].line_to is None:
line = LineBase(self.lines[indx-1])
self.lines[indx-1].line_to = line
self.canvas.add_line(line)
else:
line = self.lines[indx-1].line_to
line.add_to(myself)
return myself
def add_person_again(self, index, indi_handle, fams_handle):
self.add_person(index, indi_handle, fams_handle)
def add_marriage(self, index, indi_handle, fams_handle):
""" Makes a marriage box and add that person into the Canvas. """
if not self.inlc_marr:
return
myself = FamilyBox((index[0]-1,) + index[1:])
#calculate the text.
myself.text = self.calc_items.calc_marriage(indi_handle, fams_handle)
self.canvas.add_box(myself)
def y_index(self, x_level, index):
""" Calculate the column or generation that this person is in.
x_level -> 0 to max_gen-1
index -> 1 to (self.max_generation**2)-1
"""
#Calculate which row in the column of people.
tmp_y = index - (2**x_level)
#Calculate which row in the table (yes table) of people.
delta = (2**self.max_generation) // (2**(x_level))
return int((delta/2) + (tmp_y*delta)) -1
def do_y_indx(self):
''' Make the y_index for all boxes
first off of a forumula, then remove blank areas around the edges,
then compress the tree if desired
'''
min_y = self.y_index(self.canvas.boxes[0].level[LVL_GEN],
self.canvas.boxes[0].level[LVL_INDX])
for box in self.canvas.boxes:
if "fam" in box.boxstr:
box.level = box.level + \
(self.y_index(box.level[LVL_GEN]-1,
int(box.level[LVL_INDX]/2)),)
else:
box.level = box.level + \
(self.y_index(box.level[LVL_GEN], box.level[LVL_INDX]),)
min_y = min(min_y, box.level[LVL_Y])
#print (str(box.level))
#if a last father (of fathers) does not have a father/parents
#Then there could be a gap. Remove this gap
if min_y > 0:
for box in self.canvas.boxes:
box.level = box.level[:LVL_Y] + (box.level[LVL_Y]-min_y,)
#Now that we have y_index, lets see if we need to squish the tree
self.canvas.boxes.sort() #Sort them on the y_index
if not self.compress_tree:
return
#boxes are already in top down [LVL_Y] form so lets
#set the box in the correct y level depending on compress_tree
y_level = 0
current_y = self.canvas.boxes[0].level[LVL_Y]
for box in self.canvas.boxes:
y_index = box.level[LVL_Y]
if y_index > current_y:
current_y = y_index
y_level += 1
box.level = box.level[:LVL_Y] + (y_level,)
def do_sibs(self):
if not self.inc_sib or self.center_family is None:
return
family = self.database.get_family_from_handle(self.center_family)
mykids = [kid.ref for kid in family.get_child_ref_list()]
if len(mykids) == 1: # No other siblings. Don't do anything.
return
# The first person is the center person had he/she has our information
center = self.canvas.boxes.pop(self.canvas.boxes.index(self.lines[1]))
line = center.line_to
level = center.level[LVL_Y]
move = level - (len(mykids)//2) + ((len(mykids)+1)%2)
if move < 0:
# more kids than parents. ran off the page. Move them all down
for box in self.canvas.boxes:
box.level = (box.level[0], box.level[1], box.level[2]-move)
move = 0
line.start = []
rrr = -1 # if len(mykids)%2 == 1 else 0
for kid in mykids:
rrr += 1
mee = self.add_person((1, 1, move+rrr), kid, self.center_family)
line.add_from(mee)
#mee.level = (0, 1, level - (len(mykids)//2)+rrr)
def start(self, person_id):
""" go ahead and make it happen """
center = self.database.get_person_from_gramps_id(person_id)
if center is None:
raise ReportError(
_("Person %s is not in the Database") % person_id)
center_h = center.get_handle()
#Step 1. Get the people
self.recurse(center_h)
#Step 2. Calculate the y_index for everyone
self.do_y_indx()
#Step 3. Siblings of the center person
self.do_sibs()
#------------------------------------------------------------------------
#
# Transform Classes
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
# Class lr_Transform
#------------------------------------------------------------------------
class LRTransform:
"""
setup all of the boxes on the canvas in for a left/right report
"""
def __init__(self, canvas, max_generations):
self.canvas = canvas
self.rept_opts = canvas.report_opts
self.y_offset = self.rept_opts.littleoffset*2 + self.canvas.title.height
def _place(self, box):
""" put the box in it's correct spot """
#1. cm_x
box.x_cm = self.rept_opts.littleoffset
box.x_cm += (box.level[LVL_GEN] *
(self.rept_opts.col_width + self.rept_opts.max_box_width))
#2. cm_y
box.y_cm = self.rept_opts.max_box_height + self.rept_opts.box_pgap
box.y_cm *= box.level[LVL_Y]
box.y_cm += self.y_offset
#if box.height < self.rept_opts.max_box_height:
# box.y_cm += ((self.rept_opts.max_box_height - box.height) /2)
def place(self):
""" Step through boxes so they can be put in the right spot """
#prime the pump
self.__last_y_level = self.canvas.boxes[0].level[LVL_Y]
#go
for box in self.canvas.boxes:
self._place(box)
#------------------------------------------------------------------------
#
# class make_report
#
#------------------------------------------------------------------------
class MakeReport:
def __init__(self, dbase, doc, canvas, font_normal):
self.database = dbase
self.doc = doc
self.canvas = canvas
self.font_normal = font_normal
_gui = GUIConnect()
self.inlc_marr = _gui.inc_marr()
self.compress_tree = _gui.compress_tree()
self.mother_ht = self.father_ht = 0
self.max_generations = 0
def get_height_width(self, box):
"""
obtain width information for each level (x)
obtain height information for each item
"""
self.canvas.set_box_height_width(box)
if box.width > self.canvas.report_opts.max_box_width:
self.canvas.report_opts.max_box_width = box.width #+ box.shadow
if box.level[LVL_Y] > 0:
if box.level[LVL_INDX] % 2 == 0 and box.height > self.father_ht:
self.father_ht = box.height
elif box.level[LVL_INDX] % 2 == 1 and box.height > self.mother_ht:
self.mother_ht = box.height
if box.level[LVL_GEN] > self.max_generations:
self.max_generations = box.level[LVL_GEN]
def get_generations(self):
return self.max_generations
def start(self):
## __gui = GUIConnect()
# 1.
#set the sizes for each box and get the max_generations.
self.father_ht = 0.0
self.mother_ht = 0.0
for box in self.canvas.boxes:
self.get_height_width(box)
if self.compress_tree and not self.inlc_marr:
self.canvas.report_opts.max_box_height = \
min(self.father_ht, self.mother_ht)
else:
self.canvas.report_opts.max_box_height = \
max(self.father_ht, self.mother_ht)
#At this point we know everything we need to make the report.
#Size of each column of people - self.rept_opt.box_width
#size of each column (or row) of lines - self.rept_opt.col_width
#size of each row - self.rept_opt.box_height
#go ahead and set it now.
for box in self.canvas.boxes:
box.width = self.canvas.report_opts.max_box_width
# 2.
#setup the transform class to move around the boxes on the canvas
transform = LRTransform(self.canvas, self.max_generations)
transform.place()
class GUIConnect:
""" This is a BORG object. There is ONLY one.
This give some common routines that EVERYONE can use like
get the value from a GUI variable
"""
__shared_state = {}
def __init__(self): #We are BORG!
self.__dict__ = self.__shared_state
def set__opts(self, options, locale, name_displayer):
""" Set only once as we are BORG. """
self.__opts = options
self.locale = locale
self.n_d = name_displayer
def get_val(self, val):
""" Get a GUI value. """
value = self.__opts.get_option_by_name(val)
if value:
return value.get_value()
else:
False
def title_class(self, doc):
""" Return a class that holds the proper title based off of the
GUI options """
title_type = self.get_val('report_title')
if title_type:
return TitleA(doc, self.locale, self.n_d)
else:
return TitleN(doc, self.locale)
def inc_marr(self):
return self.get_val("inc_marr")
def inc_sib(self):
return self.get_val("inc_siblings")
def maxgen(self):
return self.get_val("maxgen")
def fill_out(self):
return self.get_val("fill_out")
def compress_tree(self):
return self.get_val("compress_tree")
#------------------------------------------------------------------------
#
# AncestorTree
#
#------------------------------------------------------------------------
class AncestorTree(Report):
""" AncestorTree Report """
def __init__(self, database, options, user):
"""
Create AncestorTree object that produces the report.
The arguments are:
database - the Gramps database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
"""
Report.__init__(self, database, options, user)
self.options = options
self._user = user
self.set_locale(options.menu.get_option_by_name('trans').get_value())
stdoptions.run_date_format_option(self, options.menu)
stdoptions.run_private_data_option(self, options.menu)
stdoptions.run_living_people_option(self, options.menu, self._locale)
self.database = CacheProxyDb(self.database)
stdoptions.run_name_format_option(self, options.menu)
self._nd = self._name_display
def begin_report(self):
"""
This report needs the following parameters (class variables)
that come in the options class.
max_generations - Maximum number of generations to include.
pagebbg - Whether to include page breaks between generations.
dispf - Display format for the output box.
scale_report - Whether to scale the report to fit the width or all.
indblank - Whether to include blank pages.
compress - Whether to compress chart.
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
We will
1. a canvas in its full one-page size
2. a page that we wish to print on
scale up/down either or both of the above as needed/desired.
almost all of this should be moved into Canvas!
"""
database = self.database
self.connect = GUIConnect()
self.connect.set__opts(self.options.menu, self._locale, self._nd)
#Set up the canvas that we will print on.
style_sheet = self.doc.get_style_sheet()
font_normal = style_sheet.get_paragraph_style("AC2-Normal").get_font()
#The canvas that we will put our report on and print off of
self.canvas = Canvas(self.doc,
ReportOptions(self.doc, font_normal, 'AC2-line'))
self.canvas.report_opts.box_shadow *= \
self.connect.get_val('shadowscale')
self.canvas.report_opts.box_pgap *= self.connect.get_val('box_Yscale')
self.canvas.report_opts.box_mgap *= self.connect.get_val('box_Yscale')
with self._user.progress(_('Ancestor Tree'),
_('Making the Tree...'), 4) as step:
#make the tree onto the canvas
## inlc_marr = self.connect.get_val("inc_marr")
self.max_generations = self.connect.get_val('maxgen')
tree = MakeAncestorTree(database, self.canvas)
tree.start(self.connect.get_val('pid'))
tree = None
step()
#Title
title = self.connect.title_class(self.doc)
center = self.database.get_person_from_gramps_id(
self.connect.get_val('pid'))
title.calc_title(center)
self.canvas.add_title(title)
#make the report as big as it wants to be.
report = MakeReport(database, self.doc, self.canvas, font_normal)
report.start()
self.max_generations = report.get_generations() #already know
report = None
step()
#Note?
if self.connect.get_val("inc_note"):
note_box = NoteBox(self.doc, "AC2-note-box",
self.connect.get_val("note_place"))
subst = SubstKeywords(self.database, self._locale, self._nd,
None, None)
note_box.text = subst.replace_and_clean(
self.connect.get_val('note_disp'))
self.canvas.add_note(note_box)
#Now we have the report in its full size.
#Do we want to scale the report?
one_page = self.connect.get_val("resize_page")
scale_report = self.connect.get_val("scale_tree")
scale = self.canvas.scale_report(one_page,
scale_report != 0,
scale_report == 2)
step()
if scale != 1 or self.connect.get_val('shadowscale') != 1.0:
self.scale_styles(scale)
def write_report(self):
one_page = self.connect.get_val("resize_page")
#scale_report = self.connect.get_val("scale_tree")
#inlc_marr = self.connect.get_val("inc_marr")
inc_border = self.connect.get_val('inc_border')
incblank = self.connect.get_val("inc_blank")
prnnum = self.connect.get_val("inc_pagenum")
#####################
#Setup page information
colsperpage = self.doc.get_usable_width()
colsperpage += self.canvas.report_opts.col_width
colsperpage = int(colsperpage / (self.canvas.report_opts.max_box_width +
self.canvas.report_opts.col_width))
colsperpage = colsperpage or 1
#####################
#Vars
if prnnum:
page_num_box = PageNumberBox(self.doc, 'AC2-box', self._locale)
#TODO - Here
#####################
#ok, everyone is now ready to print on the canvas. Paginate?
self.canvas.paginate(colsperpage, one_page)
#####################
#Yeah!!!
#lets finally make some pages!!!
#####################
pages = self.canvas.page_count(incblank)
with self._user.progress(_('Ancestor Tree'),
_('Printing the Tree...'), pages) as step:
for page in self.canvas.page_iter_gen(incblank):
self.doc.start_page()
#do we need to print a border?
if inc_border:
page.draw_border('AC2-line')
#Do we need to print the page number?
if prnnum:
page_num_box.display(page)
#Print the individual people and lines
page.display()
step()
self.doc.end_page()
def scale_styles(self, scale):
"""
Scale the styles for this report.
"""
style_sheet = self.doc.get_style_sheet()
graph_style = style_sheet.get_draw_style("AC2-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * scale)
graph_style.set_line_width(graph_style.get_line_width() * scale)
style_sheet.add_draw_style("AC2-box", graph_style)
graph_style = style_sheet.get_draw_style("AC2-fam-box")
graph_style.set_shadow(graph_style.get_shadow(),
self.canvas.report_opts.box_shadow * scale)
graph_style.set_line_width(graph_style.get_line_width() * scale)
style_sheet.add_draw_style("AC2-fam-box", graph_style)
graph_style = style_sheet.get_draw_style("AC2-note-box")
#graph_style.set_shadow(graph_style.get_shadow(),
# self.canvas.report_opts.box_shadow * scale)
graph_style.set_line_width(graph_style.get_line_width() * scale)
style_sheet.add_draw_style("AC2-note-box", graph_style)
para_style = style_sheet.get_paragraph_style("AC2-Normal")
font = para_style.get_font()
font.set_size(font.get_size() * scale)
para_style.set_font(font)
style_sheet.add_paragraph_style("AC2-Normal", para_style)
para_style = style_sheet.get_paragraph_style("AC2-Note")
font = para_style.get_font()
font.set_size(font.get_size() * scale)
para_style.set_font(font)
style_sheet.add_paragraph_style("AC2-Note", para_style)
para_style = style_sheet.get_paragraph_style("AC2-Title")
font = para_style.get_font()
font.set_size(font.get_size() * scale)
para_style.set_font(font)
style_sheet.add_paragraph_style("AC2-Title", para_style)
graph_style = GraphicsStyle()
width = graph_style.get_line_width()
width = width * scale
graph_style.set_line_width(width)
style_sheet.add_draw_style("AC2-line", graph_style)
self.doc.set_style_sheet(style_sheet)
#------------------------------------------------------------------------
#
# AncestorTreeOptions
#
#------------------------------------------------------------------------
class AncestorTreeOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
self.box_Y_sf = None
self.box_shadow_sf = None
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
return _nd.display(person)
def add_menu_options(self, menu):
##################
category_name = _("Tree Options")
self.__pid = PersonOption(_("Center Person"))
self.__pid.set_help(_("The center person for the tree"))
menu.add_option(category_name, "pid", self.__pid)
siblings = BooleanOption(
_('Include siblings of the center person'), False)
siblings.set_help(
_("Whether to only display the center person or all "
"of his/her siblings too"))
menu.add_option(category_name, "inc_siblings", siblings)
self.max_gen = NumberOption(_("Generations"), 10, 1, 50)
self.max_gen.set_help(_("The number of generations to include "
"in the tree"))
menu.add_option(category_name, "maxgen", self.max_gen)
self.fillout = EnumeratedListOption(_("Display unknown\ngenerations"),
0)
self.fillout.set_help(_("The number of generations of empty "
"boxes that will be displayed"))
menu.add_option(category_name, "fill_out", self.fillout)
self.max_gen.connect('value-changed', self.__fillout_vals)
self.__fillout_vals()
compress = BooleanOption(_('Compress tree'), True)
compress.set_help(
_("Whether to remove any extra blank spaces set "
"aside for people that are unknown"))
menu.add_option(category_name, "compress_tree", compress)
#better to 'Show siblings of\nthe center person
#Spouse_disp = EnumeratedListOption(_("Show spouses of\nthe center "
# "person"), 0)
#Spouse_disp.add_item(0, _("No. Do not show Spouses"))
#Spouse_disp.add_item(1, _("Yes, and use the Main Display Format"))
#Spouse_disp.add_item(2, _("Yes, and use the Secondary "
# "Display Format"))
#Spouse_disp.set_help(_("Show spouses of the center person?"))
#menu.add_option(category_name, "Spouse_disp", Spouse_disp)
##################
category_name = _("Report Options")
self.title = EnumeratedListOption(_("Report Title"), 0)
self.title.add_item(0, _("Do not include a title"))
self.title.add_item(1, _("Include Report Title"))
self.title.set_help(_("Choose a title for the report"))
menu.add_option(category_name, "report_title", self.title)
border = BooleanOption(_('Include a border'), False)
border.set_help(_("Whether to make a border around the report."))
menu.add_option(category_name, "inc_border", border)
prnnum = BooleanOption(_('Include Page Numbers'), False)
prnnum.set_help(_("Whether to print page numbers on each page."))
menu.add_option(category_name, "inc_pagenum", prnnum)
self.scale = EnumeratedListOption(_("Scale tree to fit"), 0)
self.scale.add_item(0, _("Do not scale tree"))
self.scale.add_item(1, _("Scale tree to fit page width only"))
self.scale.add_item(2, _("Scale tree to fit the size of the page"))
self.scale.set_help(
_("Whether to scale the tree to fit a specific paper size")
)
menu.add_option(category_name, "scale_tree", self.scale)
self.scale.connect('value-changed', self.__check_blank)
if "BKI" not in self.name.split(","):
self.__onepage = BooleanOption(
_("Resize Page to Fit Tree size\n"
"\n"
"Note: Overrides options in the 'Paper Option' tab"
),
False)
self.__onepage.set_help(
_("Whether to resize the page to fit the size \n"
"of the tree. Note: the page will have a \n"
"non standard size.\n"
"\n"
"With this option selected, the following will happen:\n"
"\n"
"With the 'Do not scale tree' option the page\n"
" is resized to the height/width of the tree\n"
"\n"
"With 'Scale tree to fit page width only' the height of\n"
" the page is resized to the height of the tree\n"
"\n"
"With 'Scale tree to fit the size of the page' the page\n"
" is resized to remove any gap in either height or width"
))
menu.add_option(category_name, "resize_page", self.__onepage)
self.__onepage.connect('value-changed', self.__check_blank)
else:
self.__onepage = None
self.__blank = BooleanOption(_('Include Blank Pages'), True)
self.__blank.set_help(_("Whether to include pages that are blank."))
menu.add_option(category_name, "inc_blank", self.__blank)
self.__check_blank()
##################
category_name = _("Report Options (2)")
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
locale_opt = stdoptions.add_localization_option(menu, category_name)
stdoptions.add_date_format_option(menu, category_name, locale_opt)
##################
category_name = _("Display")
disp = TextOption(_("Father\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"-{%s $d}" %_DIED])
disp.set_help(_("Display format for the fathers box."))
menu.add_option(category_name, "father_disp", disp)
#Will add when libsubstkeyword supports it.
#missing = EnumeratedListOption(_("Replace missing\nplaces\\dates \
# with"), 0)
#missing.add_item(0, _("Does not display anything"))
#missing.add_item(1, _("Displays '_____'"))
#missing.set_help(_("What will print when information is not known"))
#menu.add_option(category_name, "miss_val", missing)
disp_mom = TextOption(_("Mother\nDisplay Format"),
["$n",
"%s $b" %_BORN,
"%s $m" %_MARR,
"-{%s $d}" %_DIED])
disp_mom.set_help(_("Display format for the mothers box."))
menu.add_option(category_name, "mother_disp", disp_mom)
center_disp = EnumeratedListOption(_("Center person uses\n"
"which format"), 0)
center_disp.add_item(0, _("Use Fathers Display format"))
center_disp.add_item(1, _("Use Mothers display format"))
center_disp.set_help(_("The display format for the center person"))
menu.add_option(category_name, "center_uses", center_disp)
self.incmarr = BooleanOption(_('Include Marriage box'), False)
self.incmarr.set_help(
_("Whether to include a separate marital box in the report"))
menu.add_option(category_name, "inc_marr", self.incmarr)
self.incmarr.connect('value-changed', self._incmarr_changed)
self.marrdisp = StringOption(_("Marriage\nDisplay Format"),
"%s $m" % _MARR)
self.marrdisp.set_help(_("Display format for the marital box."))
menu.add_option(category_name, "marr_disp", self.marrdisp)
self._incmarr_changed()
##################
category_name = _("Advanced")
repldisp = TextOption(
_("Replace Display Format:\n'Replace this'/' with this'"),
[])
repldisp.set_help(_("i.e.\nUnited States of America/U.S.A"))
menu.add_option(category_name, "replace_list", repldisp)
# TODO this code is never used and so I conclude it is for future use
# self.__include_images = BooleanOption(
# _('Include thumbnail images of people'), False)
# self.__include_images.set_help(
# _("Whether to include thumbnails of people."))
# menu.add_option(category_name, "includeImages", self.__include_images)
self.usenote = BooleanOption(_('Include a note'), False)
self.usenote.set_help(_("Whether to include a note on the report."))
menu.add_option(category_name, "inc_note", self.usenote)
self.usenote.connect('value-changed', self._usenote_changed)
self.notedisp = TextOption(_("Note"), [])
self.notedisp.set_help(_("Add a note\n\n"
"$T inserts today's date"))
menu.add_option(category_name, "note_disp", self.notedisp)
locales = NoteType(0, 1)
self.notelocal = EnumeratedListOption(_("Note Location"), 0)
for num, text in locales.note_locals():
self.notelocal.add_item(num, text)
self.notelocal.set_help(_("Where to place the note."))
menu.add_option(category_name, "note_place", self.notelocal)
self._usenote_changed()
self.box_Y_sf = NumberOption(_("inter-box scale factor"),
1.00, 0.10, 2.00, 0.01)
self.box_Y_sf.set_help(
_("Make the inter-box spacing bigger or smaller"))
menu.add_option(category_name, "box_Yscale", self.box_Y_sf)
self.box_shadow_sf = NumberOption(_("box shadow scale factor"),
1.00, 0.00, 2.00, 0.01) # down to 0
self.box_shadow_sf.set_help(_("Make the box shadow bigger or smaller"))
menu.add_option(category_name, "shadowscale", self.box_shadow_sf)
def _incmarr_changed(self):
"""
If Marriage box is not enabled, disable Marriage Display Format box
"""
value = self.incmarr.get_value()
self.marrdisp.set_available(value)
def _usenote_changed(self):
"""
If Note box is not enabled, disable Note Location box
"""
value = self.usenote.get_value()
self.notelocal.set_available(value)
def __check_blank(self):
if self.__onepage:
value = not self.__onepage.get_value()
else:
value = True
off = value and (self.scale.get_value() != 2)
self.__blank.set_available(off)
def __fillout_vals(self):
max_gen = self.max_gen.get_value()
old_val = self.fillout.get_value()
item_list = []
item_list.append([0, _("No generations of empty boxes "
"for unknown ancestors")])
if max_gen > 1:
item_list.append([1, _("One Generation of empty boxes "
"for unknown ancestors")])
item_list.extend(
[itr, str(itr) +
_(" Generations of empty boxes for unknown ancestors")]
for itr in range(2, max_gen))
self.fillout.set_items(item_list)
if old_val+2 > len(item_list):
self.fillout.set_value(len(item_list) -2)
def make_default_style(self, default_style):
"""Make the default output style for the Ancestor Tree."""
## Paragraph Styles:
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the text display.'))
default_style.add_paragraph_style("AC2-Normal", para_style)
box_shadow = PT2CM(font.get_size()) * .6
font = FontStyle()
font.set_size(9)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_description(
_('The basic style used for the note display.'))
default_style.add_paragraph_style("AC2-Note", para_style)
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
para_style = ParagraphStyle()
para_style.set_font(font)
para_style.set_alignment(PARA_ALIGN_CENTER)
para_style.set_description(_('The style used for the title.'))
default_style.add_paragraph_style("AC2-Title", para_style)
## Draw styles
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("AC2-Normal")
graph_style.set_shadow(1, box_shadow) #shadow set by text size
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("AC2-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("AC2-Normal")
#graph_style.set_shadow(0, PT2CM(9)) #shadow set by text size
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("AC2-fam-box", graph_style)
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("AC2-Note")
graph_style.set_fill_color((255, 255, 255))
default_style.add_draw_style("AC2-note-box", graph_style)
# TODO this seems meaningless, as only the text is displayed
graph_style = GraphicsStyle()
graph_style.set_paragraph_style("AC2-Title")
graph_style.set_color((0, 0, 0))
graph_style.set_fill_color((255, 255, 255))
graph_style.set_line_width(0)
graph_style.set_description(_("Cannot edit this reference"))
default_style.add_draw_style("AC2-Title-box", graph_style)
graph_style = GraphicsStyle()
default_style.add_draw_style("AC2-line", graph_style)
#=====================================
#But even if you should suffer for what is right, you are blessed.
#"Do not fear what they fear ; do not be frightened."
#Take Courage
#1 Peter 3:14
|
jralls/gramps
|
gramps/plugins/drawreport/ancestortree.py
|
Python
|
gpl-2.0
| 42,285
|
[
"Brian"
] |
7e2c0053b31cca3ab901ba7415757a109451ca74c3887fa3e39c505307b370b0
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import subprocess
import sys
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r"^cc[\\\/].*",
r"^webkit[\\\/]compositor_bindings[\\\/].*",
r".+[\\\/]pnacl_shim\.c$",
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_, FAILS_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
),
(
'FilePathWatcher::Delegate',
(
'New code should not use FilePathWatcher::Delegate. Use the callback',
'interface instead.',
),
False,
),
(
'browser::FindLastActiveWithProfile',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindAnyBrowser',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindOrCreateTabbedBrowser',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
(
'browser::FindTabbedBrowser',
(
'This function is deprecated and we\'re working on removing it. Pass',
'more context to get a Browser*, like a WebContents, window, or session',
'id. Talk to ben@ or jam@ for more information.',
),
True,
),
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
platform_specifiers = r'(_(android|chromeos|gtk|mac|posix|win))?'
source_extensions = r'\.(cc|cpp|cxx|mm)$'
file_inclusion_pattern = r'.+%s' % source_extensions
file_exclusion_patterns = (
r'.*[/\\](fake_|test_|mock_).+%s' % source_extensions,
r'.+_test_(base|support|util)%s' % source_extensions,
r'.+_(api|browser|perf|unit|ui)?test%s%s' % (platform_specifiers,
source_extensions),
r'.+profile_sync_service_harness%s' % source_extensions,
)
path_exclusion_patterns = (
r'.*[/\\](test|tool(s)?)[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (file_exclusion_patterns + path_exclusion_patterns +
_EXCLUDED_PATHS + input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
if not input_api.is_committing:
return [output_api.PresubmitPromptWarning(_TEST_ONLY_WARNING, problems)]
else:
# We don't warn on commit, to avoid stopping commits going through CQ.
return [output_api.PresubmitNotifyResult(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_CPP_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker()
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
if not input_api.is_committing:
warning_factory = output_api.PresubmitPromptWarning
else:
# We don't want to block use of the CQ when there is a warning
# of this kind, so we only show a message when committing.
warning_factory = output_api.PresubmitNotifyResult
results.append(warning_factory(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
args = [sys.executable, 'tools/checkperms/checkperms.py', '--root',
input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
errors = []
(errors, stderrdata) = subprocess.Popen(args).communicate()
results = []
if errors:
results.append(output_api.PreSubmitError('checkperms.py failed.',
errors))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if input_api.verbose:
print 'Valid authors are %s' % ', '.join(valid_authors)
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files:
return []
if all(re.search('\.(m|mm)$|(^|[/_])mac[/_.]', f) for f in files):
return ['mac_rel', 'mac_asan']
if all(re.search('(^|[/_])win[/_.]', f) for f in files):
return ['win_rel']
if all(re.search('(^|[/_])android[/_.]', f) for f in files):
return ['android_dbg']
if all(re.search('^native_client_sdk', f) for f in files):
return ['linux_nacl_sdk', 'win_nacl_sdk', 'mac_nacl_sdk']
if all(re.search('[/_]ios[/_.]', f) for f in files):
return ['ios_rel_device', 'ios_dbg_simulator']
trybots = ['win_rel', 'linux_rel', 'mac_rel', 'linux_clang:compile',
'linux_chromeos', 'android_dbg', 'linux_asan', 'mac_asan',
'ios_rel_device', 'ios_dbg_simulator']
# Match things like path/aura/file.cc and path/file_aura.cc.
# Same for ash and chromeos.
if any(re.search('[/_](ash|aura)', f) for f in files):
trybots += ['linux_chromeos', 'linux_chromeos_clang:compile', 'win_aura',
'linux_chromeos_asan']
else:
if any(re.search('[/_]chromeos', f) for f in files):
trybots += ['linux_chromeos', 'linux_chromeos_clang:compile',
'linux_chromeos_asan']
return trybots
|
junmin-zhu/chromium-rivertrail
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 22,478
|
[
"VisIt"
] |
3e4def88c932d64e279a957b216b71af5f2005eff62654b687c7dd9b9a590f63
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Foster-Boys localization
'''
import numpy
from functools import reduce
from pyscf import lib
from pyscf.lib import logger
from pyscf.soscf import ciah
from pyscf.lo import orth, cholesky_mos
from pyscf import __config__
def kernel(localizer, mo_coeff=None, callback=None, verbose=None):
from pyscf.tools import mo_mapping
if mo_coeff is not None:
localizer.mo_coeff = numpy.asarray(mo_coeff, order='C')
if localizer.mo_coeff.shape[1] <= 1:
return localizer.mo_coeff
if localizer.verbose >= logger.WARN:
localizer.check_sanity()
localizer.dump_flags()
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.new_logger(localizer, verbose=verbose)
if localizer.conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(localizer.conv_tol*.1)
log.info('Set conv_tol_grad to %g', conv_tol_grad)
else:
conv_tol_grad = localizer.conv_tol_grad
if mo_coeff is None:
if getattr(localizer, 'mol', None) and localizer.mol.natm == 0:
# For customized Hamiltonian
u0 = localizer.get_init_guess('random')
else:
u0 = localizer.get_init_guess(localizer.init_guess)
else:
u0 = localizer.get_init_guess(None)
rotaiter = ciah.rotate_orb_cc(localizer, u0, conv_tol_grad, verbose=log)
u, g_orb, stat = next(rotaiter)
cput1 = log.timer('initializing CIAH', *cput0)
tot_kf = stat.tot_kf
tot_hop = stat.tot_hop
conv = False
e_last = 0
for imacro in range(localizer.max_cycle):
norm_gorb = numpy.linalg.norm(g_orb)
u0 = lib.dot(u0, u)
e = localizer.cost_function(u0)
e_last, de = e, e-e_last
log.info('macro= %d f(x)= %.14g delta_f= %g |g|= %g %d KF %d Hx',
imacro+1, e, de, norm_gorb, stat.tot_kf+1, stat.tot_hop)
cput1 = log.timer('cycle= %d'%(imacro+1), *cput1)
if (norm_gorb < conv_tol_grad and abs(de) < localizer.conv_tol):
conv = True
if callable(callback):
callback(locals())
if conv:
break
u, g_orb, stat = rotaiter.send(u0)
tot_kf += stat.tot_kf
tot_hop += stat.tot_hop
rotaiter.close()
log.info('macro X = %d f(x)= %.14g |g|= %g %d intor %d KF %d Hx',
imacro+1, e, norm_gorb,
(imacro+1)*2, tot_kf+imacro+1, tot_hop)
# Sort the localized orbitals, to make each localized orbitals as close as
# possible to the corresponding input orbitals
sorted_idx = mo_mapping.mo_1to1map(u0)
localizer.mo_coeff = lib.dot(localizer.mo_coeff, u0[:,sorted_idx])
return localizer.mo_coeff
def dipole_integral(mol, mo_coeff):
# The gauge origin has no effects for maximization |<r>|^2
# Set to charge center for physical significance of <r>
charge_center = numpy.einsum('z,zx->x', mol.atom_charges(), mol.atom_coords())
with mol.with_common_origin(charge_center):
dip = numpy.asarray([reduce(lib.dot, (mo_coeff.conj().T, x, mo_coeff))
for x in mol.intor_symmetric('int1e_r', comp=3)])
return dip
def atomic_init_guess(mol, mo_coeff):
s = mol.intor_symmetric('int1e_ovlp')
c = orth.orth_ao(mol, s=s)
mo = reduce(numpy.dot, (c.conj().T, s, mo_coeff))
# Find the AOs which have largest overlap to MOs
idx = numpy.argsort(numpy.einsum('pi,pi->p', mo.conj(), mo))
nmo = mo.shape[1]
idx = sorted(idx[-nmo:])
# Rotate mo_coeff, make it as close as possible to AOs
u, w, vh = numpy.linalg.svd(mo[idx])
return lib.dot(u, vh).conj().T
class Boys(ciah.CIAHOptimizer):
r'''
The Foster-Boys localization optimizer that maximizes the orbital dipole
\sum_i | <i| r |i> |^2
Args:
mol : Mole object
Kwargs:
mo_coeff : size (N,N) np.array
The orbital space to localize for Boys localization.
When initializing the localization optimizer ``bopt = Boys(mo_coeff)``,
Note these orbitals ``mo_coeff`` may or may not be used as initial
guess, depending on the attribute ``.init_guess`` . If ``.init_guess``
is set to None, the ``mo_coeff`` will be used as initial guess. If
``.init_guess`` is 'atomic', a few atomic orbitals will be
constructed inside the space of the input orbitals and the atomic
orbitals will be used as initial guess.
Note when calling .kernel(orb) method with a set of orbitals as
argument, the orbitals will be used as initial guess regardless of
the value of the attributes .mo_coeff and .init_guess.
Attributes for Boys class:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`.
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`.
conv_tol : float
Converge threshold. Default 1e-6
conv_tol_grad : float
Converge threshold for orbital rotation gradients. Default 1e-3
max_cycle : int
The max. number of macro iterations. Default 100
max_iters : int
The max. number of iterations in each macro iteration. Default 20
max_stepsize : float
The step size for orbital rotation. Small step (0.005 - 0.05) is prefered.
Default 0.03.
init_guess : str or None
Initial guess for optimization. If set to None, orbitals defined
by the attribute .mo_coeff will be used as initial guess. If set
to 'atomic', atomic orbitals will be used as initial guess. If set
to 'cholesky', then cholesky orbitals will be used as the initial guess.
Default is 'atomic'.
Saved results
mo_coeff : ndarray
Localized orbitals
'''
conv_tol = getattr(__config__, 'lo_boys_Boys_conv_tol', 1e-6)
conv_tol_grad = getattr(__config__, 'lo_boys_Boys_conv_tol_grad', None)
max_cycle = getattr(__config__, 'lo_boys_Boys_max_cycle', 100)
max_iters = getattr(__config__, 'lo_boys_Boys_max_iters', 20)
max_stepsize = getattr(__config__, 'lo_boys_Boys_max_stepsize', .05)
ah_trust_region = getattr(__config__, 'lo_boys_Boys_ah_trust_region', 3)
ah_start_tol = getattr(__config__, 'lo_boys_Boys_ah_start_tol', 1e9)
ah_max_cycle = getattr(__config__, 'lo_boys_Boys_ah_max_cycle', 40)
init_guess = getattr(__config__, 'lo_boys_Boys_init_guess', 'atomic')
def __init__(self, mol, mo_coeff=None):
ciah.CIAHOptimizer.__init__(self)
self.mol = mol
self.stdout = mol.stdout
self.verbose = mol.verbose
self.mo_coeff = mo_coeff
keys = set(('conv_tol', 'conv_tol_grad', 'max_cycle', 'max_iters',
'max_stepsize', 'ah_trust_region', 'ah_start_tol',
'ah_max_cycle', 'init_guess'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('\n')
log.info('******** %s ********', self.__class__)
log.info('conv_tol = %s' , self.conv_tol )
log.info('conv_tol_grad = %s' , self.conv_tol_grad )
log.info('max_cycle = %s' , self.max_cycle )
log.info('max_stepsize = %s' , self.max_stepsize )
log.info('max_iters = %s' , self.max_iters )
log.info('kf_interval = %s' , self.kf_interval )
log.info('kf_trust_region = %s', self.kf_trust_region)
log.info('ah_start_tol = %s' , self.ah_start_tol )
log.info('ah_start_cycle = %s' , self.ah_start_cycle )
log.info('ah_level_shift = %s' , self.ah_level_shift )
log.info('ah_conv_tol = %s' , self.ah_conv_tol )
log.info('ah_lindep = %s' , self.ah_lindep )
log.info('ah_max_cycle = %s' , self.ah_max_cycle )
log.info('ah_trust_region = %s', self.ah_trust_region)
log.info('init_guess = %s' , self.init_guess )
def gen_g_hop(self, u):
mo_coeff = lib.dot(self.mo_coeff, u)
dip = dipole_integral(self.mol, mo_coeff)
g0 = numpy.einsum('xii,xip->pi', dip, dip)
g = -self.pack_uniq_var(g0-g0.conj().T) * 2
h_diag = numpy.einsum('xii,xpp->pi', dip, dip) * 2
h_diag-= g0.diagonal() + g0.diagonal().reshape(-1,1)
h_diag+= numpy.einsum('xip,xip->pi', dip, dip) * 2
h_diag+= numpy.einsum('xip,xpi->pi', dip, dip) * 2
h_diag = -self.pack_uniq_var(h_diag) * 2
#:nmo = mo_coeff.shape[1]
#:h = numpy.einsum('xjj,xjq,pk->pjqk', dip, dip, numpy.eye(nmo))
#:h+= numpy.einsum('xqq,xjq,pk->pjqk', dip, dip, numpy.eye(nmo))
#:h+= numpy.einsum('xjq,xjp,jk->pjqk', dip, dip, numpy.eye(nmo))
#:h+= numpy.einsum('xjp,xkp,pq->pjqk', dip, dip, numpy.eye(nmo))
#:h-= numpy.einsum('xjj,xkp,jq->pjqk', dip, dip, numpy.eye(nmo))
#:h-= numpy.einsum('xpp,xjq,pk->pjqk', dip, dip, numpy.eye(nmo))
#:h-= numpy.einsum('xjp,xpq,pk->pjqk', dip, dip, numpy.eye(nmo))*2
#:h = h - h.transpose(0,1,3,2)
#:h = h - h.transpose(1,0,2,3)
#:h = h + h.transpose(2,3,0,1)
#:h *= -.5
#:idx = numpy.tril_indices(nmo, -1)
#:h = h[idx][:,idx[0],idx[1]]
g0 = g0 + g0.conj().T
def h_op(x):
x = self.unpack_uniq_var(x)
norb = x.shape[0]
#:hx = numpy.einsum('qp,xjj,xjq->pj', x, dip, dip)
#:hx+= numpy.einsum('qp,xqq,xjq->pj', x, dip, dip)
#:hx+= numpy.einsum('jk,xkk,xkp->pj', x, dip, dip)
#:hx+= numpy.einsum('jk,xpp,xkp->pj', x, dip, dip)
#:hx+= numpy.einsum('qj,xjq,xjp->pj', x, dip, dip)
#:hx+= numpy.einsum('pk,xjp,xkp->pj', x, dip, dip)
#:hx-= numpy.einsum('qp,xpp,xjq->pj', x, dip, dip) * 2
#:hx-= numpy.einsum('qp,xjp,xpq->pj', x, dip, dip) * 2
#:hx+= numpy.einsum('qj,xjp,xjq->pj', x, dip, dip)
#:hx+= numpy.einsum('pk,xkp,xjp->pj', x, dip, dip)
#:hx-= numpy.einsum('jk,xjj,xkp->pj', x, dip, dip) * 2
#:hx-= numpy.einsum('jk,xkj,xjp->pj', x, dip, dip) * 2
#:return -self.pack_uniq_var(hx)
#:hx = numpy.einsum('iq,qp->pi', g0, x)
hx = lib.dot(x.T, g0.T).conj()
#:hx+= numpy.einsum('qi,xiq,xip->pi', x, dip, dip) * 2
hx+= numpy.einsum('xip,xi->pi', dip, numpy.einsum('qi,xiq->xi', x, dip)) * 2
#:hx-= numpy.einsum('qp,xpp,xiq->pi', x, dip, dip) * 2
hx-= numpy.einsum('xpp,xip->pi', dip,
lib.dot(dip.reshape(-1,norb), x).reshape(3,norb,norb)) * 2
#:hx-= numpy.einsum('qp,xip,xpq->pi', x, dip, dip) * 2
hx-= numpy.einsum('xip,xp->pi', dip, numpy.einsum('qp,xpq->xp', x, dip)) * 2
return -self.pack_uniq_var(hx-hx.conj().T)
return g, h_op, h_diag
def get_grad(self, u=None):
if u is None: u = numpy.eye(self.mo_coeff.shape[1])
mo_coeff = lib.dot(self.mo_coeff, u)
dip = dipole_integral(self.mol, mo_coeff)
g0 = numpy.einsum('xii,xip->pi', dip, dip)
g = -self.pack_uniq_var(g0-g0.conj().T) * 2
return g
def cost_function(self, u=None):
if u is None: u = numpy.eye(self.mo_coeff.shape[1])
mo_coeff = lib.dot(self.mo_coeff, u)
dip = dipole_integral(self.mol, mo_coeff)
r2 = self.mol.intor_symmetric('int1e_r2')
r2 = numpy.einsum('pi,pi->', mo_coeff, lib.dot(r2, mo_coeff))
val = r2 - numpy.einsum('xii,xii->', dip, dip) * 2
return val
def get_init_guess(self, key='atomic'):
'''Generate initial guess for localization.
Kwargs:
key : str or bool
If key is 'atomic', initial guess is based on the projected
atomic orbitals. False
'''
nmo = self.mo_coeff.shape[1]
if isinstance(key, str) and key.lower() == 'atomic':
u0 = atomic_init_guess(self.mol, self.mo_coeff)
elif isinstance(key, str) and key.lower().startswith('cho'):
mo_init = cholesky_mos(self.mo_coeff)
S = self.mol.intor_symmetric('int1e_ovlp')
u0 = numpy.linalg.multi_dot([self.mo_coeff.T, S, mo_init])
else:
u0 = numpy.eye(nmo)
if (isinstance(key, str) and key.lower().startswith('rand')
or numpy.linalg.norm(self.get_grad(u0)) < 1e-5):
# Add noise to kick initial guess out of saddle point
dr = numpy.cos(numpy.arange((nmo-1)*nmo//2)) * 1e-3
u0 = self.extract_rotation(dr)
return u0
kernel = kernel
FB = BF = Boys
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.Mole()
mol.atom = '''
O 0. 0. 0.2
H 0. -0.5 -0.4
H 0. 0.7 -0.2
'''
mol.basis = 'ccpvdz'
mol.build()
mf = scf.RHF(mol).run()
mo = mf.mo_coeff[:,:3]
loc = Boys(mol, mo)
u0 = numpy.eye(3)
dx = 1e-5
g_num = []
hdiag_num = []
h_op, hdiag = loc.gen_g_hop(u0)[1:]
for i in range(3):
dr = numpy.zeros(3)
dr[i] = dx
u = loc.extract_rotation(dr)
cf1 =-loc.cost_function(u0)
cf2 =-loc.cost_function(u0.dot(u))
cg1 = loc.get_grad(u0)
cg2 = loc.get_grad(u0.dot(u))
g_num.append((cf2-cf1)/dx)
print('hx', abs(cg2-cg1-h_op(dr)).sum())
hdiag_num.append(h_op(dr/dx)[i])
print('g', numpy.array(g_num), loc.get_grad(u0)*2)
print('hdiag', numpy.array(hdiag_num), hdiag)
mo = Boys(mol).kernel(mf.mo_coeff[:,5:9], verbose=4)
|
sunqm/pyscf
|
pyscf/lo/boys.py
|
Python
|
apache-2.0
| 14,458
|
[
"PySCF"
] |
a9afe3efabc3e89220b44598e5c9d9fa4a27df9a2dbedc11495ef8c69bd7ec20
|
"""Support for control of Elk-M1 connected thermostats."""
from __future__ import annotations
from elkm1_lib.const import ThermostatFan, ThermostatMode, ThermostatSetting
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PRECISION_WHOLE, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import ElkEntity, create_elk_entities
from .const import DOMAIN
SUPPORT_HVAC = [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_AUTO,
HVAC_MODE_FAN_ONLY,
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Create the Elk-M1 thermostat platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
entities: list[ElkThermostat] = []
elk = elk_data["elk"]
create_elk_entities(
elk_data, elk.thermostats, "thermostat", ElkThermostat, entities
)
async_add_entities(entities, True)
class ElkThermostat(ElkEntity, ClimateEntity):
"""Representation of an Elk-M1 Thermostat."""
def __init__(self, element, elk, elk_data):
"""Initialize climate entity."""
super().__init__(element, elk, elk_data)
self._state = None
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FAN_MODE | SUPPORT_AUX_HEAT | SUPPORT_TARGET_TEMPERATURE_RANGE
@property
def temperature_unit(self):
"""Return the temperature unit."""
return self._temperature_unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._element.current_temp
@property
def target_temperature(self):
"""Return the temperature we are trying to reach."""
if self._element.mode in (
ThermostatMode.HEAT.value,
ThermostatMode.EMERGENCY_HEAT.value,
):
return self._element.heat_setpoint
if self._element.mode == ThermostatMode.COOL.value:
return self._element.cool_setpoint
return None
@property
def target_temperature_high(self):
"""Return the high target temperature."""
return self._element.cool_setpoint
@property
def target_temperature_low(self):
"""Return the low target temperature."""
return self._element.heat_setpoint
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 1
@property
def current_humidity(self):
"""Return the current humidity."""
return self._element.humidity
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._state
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return SUPPORT_HVAC
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
@property
def is_aux_heat(self):
"""Return if aux heater is on."""
return self._element.mode == ThermostatMode.EMERGENCY_HEAT.value
@property
def min_temp(self):
"""Return the minimum temperature supported."""
return 1
@property
def max_temp(self):
"""Return the maximum temperature supported."""
return 99
@property
def fan_mode(self):
"""Return the fan setting."""
if self._element.fan == ThermostatFan.AUTO.value:
return HVAC_MODE_AUTO
if self._element.fan == ThermostatFan.ON.value:
return STATE_ON
return None
def _elk_set(self, mode, fan):
if mode is not None:
self._element.set(ThermostatSetting.MODE.value, mode)
if fan is not None:
self._element.set(ThermostatSetting.FAN.value, fan)
async def async_set_hvac_mode(self, hvac_mode):
"""Set thermostat operation mode."""
settings = {
HVAC_MODE_OFF: (ThermostatMode.OFF.value, ThermostatFan.AUTO.value),
HVAC_MODE_HEAT: (ThermostatMode.HEAT.value, None),
HVAC_MODE_COOL: (ThermostatMode.COOL.value, None),
HVAC_MODE_AUTO: (ThermostatMode.AUTO.value, None),
HVAC_MODE_FAN_ONLY: (ThermostatMode.OFF.value, ThermostatFan.ON.value),
}
self._elk_set(settings[hvac_mode][0], settings[hvac_mode][1])
async def async_turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self._elk_set(ThermostatMode.EMERGENCY_HEAT.value, None)
async def async_turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self._elk_set(ThermostatMode.HEAT.value, None)
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [HVAC_MODE_AUTO, STATE_ON]
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
if fan_mode == HVAC_MODE_AUTO:
self._elk_set(None, ThermostatFan.AUTO.value)
elif fan_mode == STATE_ON:
self._elk_set(None, ThermostatFan.ON.value)
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if low_temp is not None:
self._element.set(ThermostatSetting.HEAT_SETPOINT.value, round(low_temp))
if high_temp is not None:
self._element.set(ThermostatSetting.COOL_SETPOINT.value, round(high_temp))
def _element_changed(self, element, changeset):
mode_to_state = {
ThermostatMode.OFF.value: HVAC_MODE_OFF,
ThermostatMode.COOL.value: HVAC_MODE_COOL,
ThermostatMode.HEAT.value: HVAC_MODE_HEAT,
ThermostatMode.EMERGENCY_HEAT.value: HVAC_MODE_HEAT,
ThermostatMode.AUTO.value: HVAC_MODE_AUTO,
}
self._state = mode_to_state.get(self._element.mode)
if self._state == HVAC_MODE_OFF and self._element.fan == ThermostatFan.ON.value:
self._state = HVAC_MODE_FAN_ONLY
|
rohitranjan1991/home-assistant
|
homeassistant/components/elkm1/climate.py
|
Python
|
mit
| 6,616
|
[
"Elk"
] |
c14c317141eb0a2b5bd146f7fe5bbecaa67e96b97334ce800d68112205eabe88
|
#!/usr/bin/env python
# general imports
from numpy import *
from random import *
# imp general
import IMP
# our project
from IMP.isd import *
# unit testing framework
import IMP.test
class MockFunc:
def __init__(self, setval, evaluate, evalargs=1, update=None):
self.__set = setval
self.__eval = evaluate
self.__update = update
self.__evalargs = evalargs
def set_evalargs(self, evalargs):
self.__evalargs = evalargs
def __call__(self, value):
self.__set(value)
if self.__update:
self.__update()
return self.__eval(self.__evalargs)
class Tests(IMP.test.TestCase):
"""test of the GPI restraint with two data points, linear prior mean and
gaussian prior covariances. Sigma is not optimzed.
"""
def setUp(self):
IMP.test.TestCase.setUp(self)
# IMP.set_log_level(IMP.TERSE)
IMP.set_log_level(0)
self.m = IMP.Model()
data = open(self.get_input_file_name('lyzexp_gpir.dat')).readlines()
data = [list(map(float, d.split())) for d in data]
self.q = [[i[0]] for i in data]
self.I = [i[1] for i in data]
self.err = [i[2] for i in data]
self.N = 10
self.G = Scale.setup_particle(IMP.Particle(self.m), 3.0)
self.G.set_nuisance_is_optimized(True)
self.Rg = Scale.setup_particle(IMP.Particle(self.m), 10.0)
self.Rg.set_nuisance_is_optimized(True)
# put d=15 so we don't use the porod region
self.d = Scale.setup_particle(IMP.Particle(self.m), 15.0)
self.d.set_nuisance_is_optimized(False)
self.s = Scale.setup_particle(IMP.Particle(self.m), 0.0)
self.s.set_nuisance_is_optimized(False)
self.A = Scale.setup_particle(IMP.Particle(self.m), 0.0)
self.A.set_nuisance_is_optimized(False)
self.mean = GeneralizedGuinierPorodFunction(
self.G, self.Rg, self.d, self.s, self.A)
self.tau = Switching.setup_particle(IMP.Particle(self.m), 1.0)
self.tau.set_nuisance_is_optimized(True)
self.lam = Scale.setup_particle(IMP.Particle(self.m), 1.0)
self.lam.set_nuisance_is_optimized(True)
self.sig = Scale.setup_particle(IMP.Particle(self.m), 1.0)
self.sig.set_nuisance_is_optimized(False)
self.cov = Covariance1DFunction(self.tau, self.lam, 2.0)
self.gpi = IMP.isd.GaussianProcessInterpolation(self.q, self.I,
self.err, self.N, self.mean, self.cov, self.sig)
self.gpr = IMP.isd.GaussianProcessInterpolationRestraint(
self.m, self.gpi)
self.sf = IMP.core.RestraintsScoringFunction([self.gpr])
self.particles = [
self.G,
self.Rg,
self.d,
self.s,
self.sig,
self.tau,
self.lam]
def shuffle_particle_values(self):
particles = [(self.alpha, -10, 10),
(self.beta, -10, 10),
(self.tau, 0.001, 10),
(self.lam, 0.1, 10),
(self.sig, 0.1, 10)]
# number of shuffled values
for i in range(randint(0, 5)):
# which particle
p, imin, imax = particles.pop(randint(0, len(particles) - 1))
p.set_nuisance(uniform(imin, imax))
def testDerivNumericG(self):
"""
test the derivatives of the gpi numerically for G
"""
pnum = 0
values = linspace(1, 10)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testDerivNumericRg(self):
"""
test the derivatives of the gpi numerically for Rg
"""
pnum = 1
values = linspace(1, 10)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-1)
def testDerivNumericTau(self):
"""
test the derivatives of the gpi numerically for Tau
"""
pnum = 5
values = linspace(.1, .9)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, .01)
self.assertAlmostEqual(expected, observed, delta=5e-2)
def testDerivNumericLambda(self):
"""
test the derivatives of the gpi numerically for Lambda
"""
pnum = 6
values = linspace(.3, 2)
particle = self.particles[pnum]
PFunc = MockFunc(particle.set_nuisance, self.sf.evaluate, False)
for val in values:
particle.set_nuisance(val)
ene = self.sf.evaluate(True)
observed = particle.get_nuisance_derivative()
expected = IMP.test.numerical_derivative(PFunc, val, .02)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericGG(self):
"""
test the Hessian of the function numerically wrt G and G
"""
pa = 0
pb = 0
values = range(1, 5)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
observed = self.gpr.get_hessian(False)[pa][pb]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testHessianNumericGRg(self):
"""
test the Hessian of the function numerically wrt G and Rg
"""
pa = 1
pb = 0
values = linspace(1, 10)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
observed = self.gpr.get_hessian(False)[pa][pb]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testHessianNumericRgRg(self):
"""
test the Hessian of the function numerically wrt Rg and Rg
"""
pa = 1
pb = 1
values = linspace(1, 10)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
observed = self.gpr.get_hessian(False)[pa][pb]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testHessianNumericRgG(self):
"""
test the Hessian of the function numerically wrt Rg and G
"""
pa = 1
pb = 0
values = linspace(1, 10)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
observed = self.gpr.get_hessian(False)[pa][pb]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testHessianNumericTauTau(self):
"""
test the Hessian of the function numerically wrt Tau and Tau
"""
pa = 5
pb = 5
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 3][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericTauLambda(self):
"""
test the Hessian of the function numerically wrt Tau and Lambda
"""
pa = 5
pb = 6
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 3][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericLambdaLambda(self):
"""
test the Hessian of the function numerically wrt Lambda and Lambda
"""
pa = 6
pb = 6
values = linspace(1, 10)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 3][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-3)
def testHessianNumericGTau(self):
"""
test the Hessian of the function numerically wrt G and Tau
"""
pa = 0
pb = 5
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericRgTau(self):
"""
test the Hessian of the function numerically wrt Rg and Tau
"""
pa = 1
pb = 5
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericGLambda(self):
"""
test the Hessian of the function numerically wrt G and Lambda
"""
pa = 0
pb = 6
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericRgLambda(self):
"""
test the Hessian of the function numerically wrt Rg and Lambda
"""
pa = 1
pb = 6
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa][pb - 3]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
def testHessianNumericLambdaRg(self):
"""
test the Hessian of the function numerically wrt Rg and Lambda
"""
pa = 6
pb = 1
values = linspace(.1, .9)
ppa = self.particles[pa]
ppb = self.particles[pb]
PFunc = MockFunc(ppb.set_nuisance,
lambda a: ppa.get_nuisance_derivative(), None,
update=lambda: self.sf.evaluate(True))
for val in values:
ppb.set_nuisance(val)
# IMP.set_log_level(IMP.TERSE)
# s and d not opt
observed = self.gpr.get_hessian(False)[pa - 3][pb]
# IMP.set_log_level(0)
expected = IMP.test.numerical_derivative(PFunc, val, 0.01)
self.assertAlmostEqual(expected, observed, delta=1e-2)
if __name__ == '__main__':
IMP.test.main()
|
shanot/imp
|
modules/isd/test/medium_test_GaussianProcessInterpolationRestraintNumericallyNoSigma.py
|
Python
|
gpl-3.0
| 15,384
|
[
"Gaussian"
] |
bb405f9a49c50a647f2fba96143709a46ad3b5ec41f0835717a298bbe79aba5f
|
#!/usr/bin/env python
"""
function:
parse the block of thermo data in a lammps logfile and perform auto- and
cross correlation of the specified column data. The total sum of the
correlation is also computed which can be converted to an integral by
multiplying by the timestep.
output:
standard output contains column data for the auto- & cross correlations
plus the total sum of each. Note, only the upper triangle of the
correlation matrix is computed.
usage:
correlate.py [-c col] <-c col2> <-s max_correlation_time> [logfile]
"""
import sys
import re
import array
# parse command line
maxCorrelationTime = 0
cols = array.array("I")
nCols = 0
args = sys.argv[1:]
index = 0
while index < len(args):
arg = args[index]
index += 1
if (arg == "-c"):
cols.append(int(args[index])-1)
nCols += 1
index += 1
elif (arg == "-s"):
maxCorrelationTime = int(args[index])
index += 1
else :
filename = arg
if (nCols < 1): raise RuntimeError, 'no data columns requested'
data = [array.array("d")]
for s in range(1,nCols) : data.append( array.array("d") )
# read data block from log file
start = False
input = open(filename)
nSamples = 0
pattern = re.compile('\d')
line = input.readline()
while line :
columns = line.split()
if (columns and pattern.match(columns[0])) :
for i in range(nCols):
data[i].append( float(columns[cols[i]]) )
nSamples += 1
start = True
else :
if (start) : break
line = input.readline()
print "# read :",nSamples," samples of ", nCols," data"
if( maxCorrelationTime < 1): maxCorrelationTime = int(nSamples/2);
# correlate and integrate
correlationPairs = []
for i in range(0,nCols):
for j in range(i,nCols): # note only upper triangle of the correlation matrix
correlationPairs.append([i,j])
header = "# "
for k in range(len(correlationPairs)):
i = str(correlationPairs[k][0]+1)
j = str(correlationPairs[k][1]+1)
header += " C"+i+j+" sum_C"+i+j
print header
nCorrelationPairs = len(correlationPairs)
sum = [0.0] * nCorrelationPairs
for s in range(maxCorrelationTime) :
correlation = [0.0] * nCorrelationPairs
nt = nSamples-s
for t in range(0,nt) :
for p in range(nCorrelationPairs):
i = correlationPairs[p][0]
j = correlationPairs[p][1]
correlation[p] += data[i][t]*data[j][s+t]
output = ""
for p in range(0,nCorrelationPairs):
correlation[p] /= nt
sum[p] += correlation[p]
output += str(correlation[p]) + " " + str(sum[p]) + " "
print output
|
qipa/lammps
|
doc/doc2/Scripts/correlate.py
|
Python
|
gpl-2.0
| 2,537
|
[
"LAMMPS"
] |
24e9a46deae3ad00db5a6dfbffdd38b9a71f313e52c8264c1ef234de14400ec7
|
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('blog_backend.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
markliederbach/blog-backend
|
config/urls.py
|
Python
|
mit
| 1,552
|
[
"VisIt"
] |
7351c2742df27069f88e85e8ae511c5f1977b62a3ffda4b50cd1359c29d68ba1
|
from .fgir import *
from .optimize import FlowgraphOptimization
from .error import Warn
import asyncio
class PCodeOp(object):
'''A class interface for creating coroutines.
This helps us keep track of valid computational elements. Every coroutine in
a PCode object should be an method of PCodeOp.'''
@staticmethod
async def _node(in_qs, out_qs, func):
'''A helper function to create coroutines.
`in_qs`: an ordered list of asyncio.Queues() which hold the node's inputs.
`out_qs`: a list of asyncio.Queues() into which the function's output should go
`func`: the function to apply to the inputs which produces the output value'''
# hint: look at asyncio.gather
input_generator = (in_q.get() for in_q in in_qs)
inputs = await asyncio.gather(*input_generator)
# hint: the same return value of the function is put in every output queue
outputs = func(*inputs)
for out_q in out_qs:
await out_q.put(outputs)
@staticmethod
async def forward(in_qs, out_qs):
def f(input):
return input
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def libraryfunction(in_qs, out_qs, function_ref):
def f(*inputs):
return function_ref(*inputs)
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def librarymethod(in_qs, out_qs, method_ref):
def f(*inputs):
return method_ref.__get__(inputs[0])(*inputs[1:])
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def input(in_qs, out_qs):
def f(input):
return input
await PCodeOp._node(in_qs, out_qs, f)
@staticmethod
async def literal(out_qs, value_ref):
def f(*inputs):
return value_ref
await PCodeOp._node(in_qs, out_qs, f)
class PCode(object):
def __init__(self):
self.inputs = [] # ordered
self.outputs = [] # ordered
self.ops = [] # unordered
self.retvals = None
def add_op(self, pcode_op_coroutine):
self.ops.append( pcode_op_coroutine )
async def input_generator(self,input_args):
gen_coroutines = [q.put(i) for q,i in zip(self.inputs, input_args)]
await asyncio.gather(*gen_coroutines)
async def output_collector(self, future):
col_coroutines = [q.get() for q in self.outputs]
output_args = await asyncio.gather(*col_coroutines)
self.retvals = output_args
return output_args
async def driver(self, input_args, future):
_,value,*_ = await asyncio.gather(self.input_generator(input_args), self.output_collector(future), *self.ops)
future.set_result(value)
def run(self, *input_args):
return_future = asyncio.Future()
asyncio.ensure_future(self.driver(input_args, return_future))
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.run_until_complete(return_future)
return return_future.result()[0]
class PCodeGenerator(FlowgraphOptimization):
def __init__(self):
self.pcodes = {}
def visit(self, flowgraph):
pc = PCode()
# Create asyncio queues for every edge
# qs is indexed by tuples of the source and destination node ids
# for the inputs of a component, the source should be None
qs = {} # { (src,dst)=>asyncio.Queue(), ... }
# Populate qs by iterating over inputs of every node
# hint: destination nodes should be in flowgraph nodes
# hint: sources are their inputs
visited_nodes = set()
def pop_qs(dest_nodes):
src_nodes = set()
for dest_node in dest_nodes:
input_nodes = set(flowgraph.pre(dest_node))
for input_node in input_nodes:
qs[(input_node, dest_node)] = asyncio.Queue()
src_nodes = src_nodes.union(input_nodes)
visited_nodes.add(dest_node)
# Make src nodes as new nodes and call pop_qs recursively
new_dest_nodes = src_nodes.difference(visited_nodes)
if new_dest_nodes:
pop_qs(new_dest_nodes)
pop_qs(flowgraph.outputs)
# Add an extra input queue for each component input
component_inputs = []
for dst in flowgraph.inputs:
q = asyncio.Queue()
component_inputs.append(q)
qs[(None,dst)] = q
qs[(None,dst)]._endpoints = (None,dst)
pc.inputs = component_inputs
# Now create all the coroutines from the nodes.
for (node_id,node) in flowgraph.nodes.items():
node_in_qs = [qs[src_id,node_id] for src_id in node.inputs]
out_ids = [i for (i,n) in flowgraph.nodes.items() if node_id in n.inputs]
node_out_qs = [qs[node_id,dst_id] for dst_id in out_ids]
if node.type==FGNodeType.forward:
pc.add_op( PCodeOp.forward(node_in_qs, node_out_qs) )
elif node.type==FGNodeType.libraryfunction:
pc.add_op( PCodeOp.libraryfunction(node_in_qs, node_out_qs, node.ref) )
elif node.type==FGNodeType.librarymethod:
pc.add_op( PCodeOp.librarymethod(node_in_qs, node_out_qs, node.ref) )
elif node.type==FGNodeType.input:
# Add an extra input queue for each component input
node_in_q = qs[(None,node_id)]
pc.add_op( PCodeOp.input([node_in_q], node_out_qs) )
elif node.type==FGNodeType.output:
# Remove the output node and just use its input queues directly.
pc.outputs = node_in_qs
elif node.type==FGNodeType.literal:
pc.add_op( PCodeOp.literal(node_out_qs, node.ref) )
self.pcodes[flowgraph.name] = pc
self.queues = qs
|
cs207-project/TimeSeries
|
pype/pcode.py
|
Python
|
mit
| 5,337
|
[
"VisIt"
] |
9ae87e3a8fca58898b5268c7ca6f23cacc1d0405d22e90293e19db3eabe22bb9
|
# Copyright (C) 2017 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
import numpy, pycbc.psd
from pycbc.types import TimeSeries, FrequencySeries, complex_same_precision_as
from numpy.random import RandomState
# These need to be constant to be able to recover identical results.
# The hope is that nobody needs a higher resolution
SAMPLE_RATE = 16384
BLOCK_SIZE = 100
FILTER_LENGTH = 128
def block(seed):
""" Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
Returns
--------
noise : numpy.ndarray
Array of random numbers
"""
num = SAMPLE_RATE * BLOCK_SIZE
rng = RandomState(seed % 2**32)
variance = SAMPLE_RATE / 2
return rng.normal(size=num, scale=variance**0.5)
def normal(start, end, seed=0):
""" Generate data with a white Gaussian (normal) distribution
Parameters
----------
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise
"""
# This is reproduceable because we used fixed seeds from known values
s = int(start / BLOCK_SIZE)
e = int(end / BLOCK_SIZE)
# The data evenly divides so the last block would be superfluous
if end % BLOCK_SIZE == 0:
e -= 1
sv = RandomState(seed).randint(-2**50, 2**50)
data = numpy.concatenate([block(i + sv) for i in numpy.arange(s, e + 1, 1)])
ts = TimeSeries(data, delta_t=1.0 / SAMPLE_RATE, epoch=start)
return ts.time_slice(start, end)
def colored_noise(psd, start_time, end_time, seed=0, low_frequency_cutoff=1.0):
""" Create noise from a PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd : pycbc.types.FrequencySeries
PSD to color the noise
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {10.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
psd = psd.copy()
flen = int(SAMPLE_RATE / psd.delta_f) / 2 + 1
oldlen = len(psd)
psd.resize(flen)
# Want to avoid zeroes in PSD.
max_val = psd.max()
for i in xrange(len(psd)):
if i >= (oldlen-1):
psd.data[i] = psd[oldlen - 2]
if psd[i] == 0:
psd.data[i] = max_val
wn_dur = int(end_time - start_time) + 2*FILTER_LENGTH
if psd.delta_f >= 1. / (2.*FILTER_LENGTH):
# If the PSD is short enough, this method is less memory intensive than
# resizing and then calling inverse_spectrum_truncation
psd = pycbc.psd.interpolate(psd, 1.0 / (2.*FILTER_LENGTH))
# inverse_spectrum_truncation truncates the inverted PSD. To truncate
# the non-inverted PSD we give it the inverted PSD to truncate and then
# invert the output.
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
psd = psd.astype(complex_same_precision_as(psd))
# Zero-pad the time-domain PSD to desired length. Zeroes must be added
# in the middle, so some rolling between a resize is used.
psd = psd.to_timeseries()
psd.roll(SAMPLE_RATE * FILTER_LENGTH)
psd.resize(wn_dur * SAMPLE_RATE)
psd.roll(-SAMPLE_RATE * FILTER_LENGTH)
# As time series is still mirrored the complex frequency components are
# 0. But convert to real by using abs as in inverse_spectrum_truncate
psd = psd.to_frequencyseries()
else:
psd = pycbc.psd.interpolate(psd, 1.0 / wn_dur)
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[:kmin].clear()
asd = (psd.real())**0.5
del psd
white_noise = normal(start_time - FILTER_LENGTH, end_time + FILTER_LENGTH,
seed=seed)
white_noise = white_noise.to_frequencyseries()
# Here we color. Do not want to duplicate memory here though so use '*='
white_noise *= asd
del asd
colored = white_noise.to_timeseries()
del white_noise
return colored.time_slice(start_time, end_time)
def noise_from_string(psd_name, start_time, end_time, seed=0, low_frequency_cutoff=1.0):
""" Create noise from an analytic PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd_name : str
Name of the analytic PSD to use.
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {10.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
delta_f = 1.0 / FILTER_LENGTH
flen = int(SAMPLE_RATE / delta_f) / 2 + 1
psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)
return colored_noise(psd, start_time, end_time,
seed=seed,
low_frequency_cutoff=low_frequency_cutoff)
|
hagabbar/pycbc_copy
|
pycbc/noise/reproduceable.py
|
Python
|
gpl-3.0
| 6,953
|
[
"Gaussian"
] |
b4f4f24b548b6f27b57b112009df959191856425ee2121b4afc2614a2e7183bc
|
import vtk
import math
vtk_major_version = vtk.VTK_MAJOR_VERSION
"""
vtk.vtkPoints(): A set of points cam be collected in a vtkPoints
object by first setting the number of points with the
vtk_points.SetNumberOfPoints(npts) method and then enumerating
through the points and using the vtk_points.SetPoint(i, point) method.
"""
npts = 100
vtk_points = vtk.vtkPoints()
vtk_points.SetNumberOfPoints(100)
for i in range (npts):
x = math.sin(math.pi*i/20.)
y = math.cos(math.pi*i/20.)
z = 2*i/float(npts)
vtk_points.SetPoint(i, (x,y,z))
"""
vtk.vtkCellArray(): the ordering of the points or the connectivity of
the points can be captured with a vtkCellArray object. This is done by
first declaring that you will be inserting a cell with a set number of
points using the vtk_cell_array.InsertNextCell(npts) and then looping
over the range of the number of points with the following loop,
for i in range(npts):
vtk_cell_array.InsertCellPoint(i)
"""
vtk_cell_array = vtk.vtkCellArray()
vtk_cell_array.InsertNextCell(npts)
for i in range(npts):
vtk_cell_array.InsertCellPoint(i)
"""
vtk.vtkFLoatArray(): A set of values associated with the initial
points, be it force magnitudes at the point or bending moments, can be
captured as a scalar array of floating point values with the
vtkFloatArray object. this is done by first declaring the number of
scalars with the vtk_float_array.SetNumberOfValues(npts) method and then
enumerating through the points and using the
vtk_float_array.SetValue(i, value) method.
"""
value = lambda i: math.fabs(math.sin(math.pi*i/30.))
vtk_float_array = vtk.vtkFloatArray()
vtk_float_array.SetNumberOfValues(npts)
for i in range(npts):
vtk_float_array.SetValue(i, value(i))
"""
vtk.vtkPolyData(): The point connectivity and scalar values can be
encapsulated in the vtkPolyData object this is done by using the,
vtk_poly_data.SetPoints(vtk_points), vtk_poly_data.SetLines(vtk_cell_array) and
vtk_poly_data.GetPointData().SetScalars(vtkFLoatArray) methods
"""
vtk_poly_data = vtk.vtkPolyData()
vtk_poly_data.SetPoints(vtk_points)
vtk_poly_data.SetLines(vtk_cell_array)
vtk_poly_data.GetPointData().SetScalars(vtk_float_array)
"""
vtk.vtkSplineFilter(): The data can be smoothly interpolated across a
number number of subdivisions using the vtkSplineFilter object. This
is accomplished by first setting the input of the spline filter to be
the previously constructed vtkPolyData object using the
vtk_spline_filter.SetInput(vtk_poly_data) method, and then setting the
number of desired subdivisions to create a smooth curve with the
vtk_spline_filter.SetNumberOfSubdivisions(int), and finally call the
vtk_spline_filter.Update() method.
"""
vtk_spline_filter = vtk.vtkSplineFilter()
if vtk_major_version == 6:
vtk_spline_filter.SetInputData(vtk_poly_data)
elif vtk_major_version == 7:
vtk_spline_filter.SetInputData(vtk_poly_data)
else:
# this was written in ~2009, so we had to hack it
# http://www.patinaed.org/sdk/2009/09/vtk-vtktubefilter-example-in-python.html
raise NotImplementedError(vtk_major_version)
#vtk_spline_filter.SetInput(vtk_poly_data)
vtk_spline_filter.SetNumberOfSubdivisions(5*npts)
vtk_spline_filter.Update()
"""
vtk.vtkTubeFilter(): The data can be visualized as colored tube using
the vtkTubeFilter. This is accomplished by first setting the
vtkplineFilter output port as the input connection to the
vtkTubleFilter with the,
vtk_tube_filter.SetInputConnection(vtk_spline_filter.GetOutputPort())
statement. The tubes radius of the can be set with the
vtk_tube_filter.SetRadius(radius) method. The smoothness around the tube
can be adjusted with the vtk_tube_filter.SetNumberOfSides(nSides)
method. If it is desired to have the end caps on the tube this can be
done with the vtk_tube_filter.CappingOn() method.
"""
vtk_tube_filter = vtk.vtkTubeFilter()
vtk_tube_filter.SetInputConnection(vtk_spline_filter.GetOutputPort())
vtk_tube_filter.SetRadius(0.15)
vtk_tube_filter.SetNumberOfSides(20)
vtk_tube_filter.CappingOn()
"""
vtk.vtkPolyDataMapper(): The out put of the vtkTubeFilter can be
visulized by creating a vtkPolyDataMapper object and then setting the
vtkTubeFilters output port as the input connection of the
vtkPolyDataMapper with the
vtk_poly_data_mapper.SetInputConnection(vtk_tube_filter.GetOutputPort())
method.
"""
vtk_poly_data_mapper = vtk.vtkPolyDataMapper()
vtk_poly_data_mapper.SetInputConnection(vtk_tube_filter.GetOutputPort())
"""
vtk.vtkActor(): the vtkPolyDataMapper construction can be pipped into
a vtkActor object using the vtk_actor.SetMapper(vtk_poly_data_mapper)
method.
"""
vtk_actor = vtk.vtkActor()
vtk_actor.SetMapper(vtk_poly_data_mapper)
"""
vtk.vtkRenderer(): The vtkActor can be rendered for the screen
by using the vtkRenderer.AddActor(vtk_actor) method.
"""
vtk_renderer = vtk.vtkRenderer()
vtk_renderer.AddActor(vtk_actor)
"""
vtk.vtkRenderWindow(): the vtkRenderer can be added to a render window
with the vtk_render_window.AddRenderer(vtk_renderer) method. The final
rendering can be exicuted with the vtk_render_window.Render() method
"""
vtk_render_window = vtk.vtkRenderWindow()
vtk_render_window.AddRenderer(vtk_renderer)
vtk_render_window.Render()
"""
vtk.vtkRenderWindowInteractor(): The render window can be held on
the screen by adding set it with the
vtk_render_window_interactor.SetRenderWindow(vtk_render_window) method. Then you
must start and initialize the vtkRenderWindowInteractor with the
vtk_render_window_interactor.Initialize() and
vtk_render_window_interactor.Start() methods.
"""
vtk_render_window_interactor = vtk.vtkRenderWindowInteractor()
vtk_render_window_interactor.SetRenderWindow(vtk_render_window)
vtk_render_window_interactor.Initialize()
vtk_render_window_interactor.Start()
|
saullocastro/pyNastran
|
pyNastran/gui/vtk_examples/tube_filter_helix.py
|
Python
|
lgpl-3.0
| 5,741
|
[
"VTK"
] |
44c86c01ad35c6b3ccd66c32845f18eb9f65318d68cab35b6400e9e32d621f0c
|
from selenium import webdriver
from torProfile import TorProfile
from pyvirtualdisplay import Display
import selenium
from random import randint
import subprocess
import os
import signal
import sys
import time
training_data = "Dumps/training/"
experiment_data = "Dumps/experiment/"
iface = "eth1"
sleep_time = 2.0
load_timeout = 120.0
def createPageList(in_file, n, random):
with open(in_file, "r") as f:
if random:
sites = [""]*n
all_sites = [next(f).split(",")[1].rstrip() for x in xrange(1000)]
for i in range(n):
sites[i] = all_sites[randint(0, n-1)]
else:
sites = [next(f).split(",")[1].rstrip() for x in xrange(n)]
f.close()
return sites
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def getFilename(dir):
max_i = -1
try:
max_i = max([int(x.split(".")[0]) for x in os.listdir(dir)])
except:
pass
return "%d.cap" % (max_i + 1)
def startTshark(f_path):
command = "tshark -f tcp -i %s -w %s" % (iface, f_path)
FNULL = open(os.devnull, 'w')
tshark_proc = subprocess.Popen(command, stdout=FNULL, close_fds=True, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
return tshark_proc.pid
def stopTshark(pid):
try:
os.killpg(pid, signal.SIGTERM)
except:
print "Could not stop tshark process"
FNULL = open(os.devnull, 'w')
subprocess.Popen("killall tshark", stdout=FNULL, close_fds=True, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
def loadPage(url):
driver = webdriver.Firefox(firefox_profile=TorProfile().p)
driver.set_page_load_timeout(load_timeout)
try:
driver.get("http://"+url)
driver.close()
time.sleep(sleep_time)
except selenium.common.exceptions.TimeoutException:
print "Error lading page: timed out"
time.sleep(sleep_time)
driver.close()
return -1
except (KeyboardInterrupt, SystemExit):
driver.close()
raise
except:
print "Unexpected error when loading page:", sys.exc_info()[0]
time.sleep(sleep_time)
driver.close()
raise
def removeFile(f_path):
os.remove(f_path)
def capturePage(folder, page):
# Create directory for page
folder = folder + page.split("/")[0]
mkdir(folder)
f_path = "%s/%s" % (folder, getFilename(folder))
tshark_pid = startTshark(f_path)
try:
s = loadPage(page)
stopTshark(tshark_pid)
if s == -1:
removeFile(f_path)
except (KeyboardInterrupt, SystemExit):
stopTshark(tshark_pid)
removeFile(f_path)
sys.exit()
except:
print "Unexpected error when capturing website:", sys.exc_info()[0]
stopTshark(tshark_pid)
removeFile(f_path)
raise
if __name__ == "__main__":
manual = False
try:
plist = sys.argv[1]
if plist == "manual":
manual = True
elif not os.path.isfile(plist):
print "ERROR: File %s not found" % plist
raise
else:
n = int(sys.argv[2])
iface = sys.argv[3]
t = int(sys.argv[4])
except:
print "Usage:\tpython %s <web page list> <number of pages to visit> <network interface> <training data (0/1)> OR" % sys.argv[0]
print "\tpython %s manual <network interface> <training_data (0/1)> <web page(s)>" % sys.argv[0]
print "Example: python %s alexa.csv 100 eth1 1 (capture training data from the first 100 pages of list alexa.csv on eth1)" % sys.argv[0]
sys.exit()
if manual:
iface = sys.argv[2]
t = int(sys.argv[3])
page_list = []
cnt = 4
while True:
try:
page_list.append(sys.argv[cnt])
cnt += 1
except:
break
else:
page_list = createPageList(plist, n, False)
display = Display(visible=0, size=(800, 600))
display.start()
p = training_data if t else experiment_data
for i,page in enumerate(page_list):
print "Capturing web page %d/%d: %s" % (i+1, len(page_list), page)
capturePage(p, page)
display.stop()
|
chhans/tor-automation
|
capture.py
|
Python
|
mit
| 3,679
|
[
"VisIt"
] |
4be440b8e9d901f16ce595be7812812627d006ea29b13f7f10b6454c0844be9f
|
# Copyright (c) 2017, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
ha2ev = 27.211396132
class YamboGreenDB():
"""
Read the green's functions calculated using yambo
These green's functions describe the spectral function of the quasiparticles.
The quasi-particles can be from electron-phonon or GW calculations
"""
def __init__(self,save='SAVE',filename='ndb.G'):
self.filename = "%s/%s"%(save,filename)
#read em1s database
try:
database = Dataset(self.filename, 'r')
except:
raise IOError("Error opening %s in YamboGreenDB"%self.filename)
#read the Green's functions energies
re,im = database['Green_Functions_Energies'][:]*ha2ev
self.energies = (re+im*1j).T
#read the Green's Functions
re,im = database['Green_Functions'][:]*ha2ev
self.green = (re+im*1j).T
#read the self-energy operator
re,im = database['SE_Operator'][:]*ha2ev
self.se = (re+im*1j).T
self.nqps, self.nenergies = self.green.shape
#read QP_table
qptable = database['QP_table'][:]
self.band1, self.band2, self.kindex = qptable
#read QP_kpts
kpts = database['QP_kpts'][:].T
self.qpoints = kpts.shape
def plot(self,ax,nqp=0,nb=0,what='SE',**kwargs):
"""
Plot quantities from this database
"""
x = self.energies[nqp]
options = {'SE':self.se,
'green':self.green}
y = options[what][nqp]
ax.plot(x.real,y.real,label='Re(%s)'%what,**kwargs)
ax.plot(x.real,y.imag,label='Im(%s)'%what,**kwargs)
def getQP(self,e0,debug=False):
"""
Get quasiparticle states
Arguments:
e0 -> bare eigenvalues
"""
from scipy.optimize import bisect
from scipy.interpolate import interp1d
from scipy.misc import derivative
#check if the eigenvalues have the correct dimensions
if len(e0) != self.nqps:
raise ValueError('Wrong dimensions in bare eigenvalues')
#in case something is strange we plot the stuff
def error(nqp):
ax = plt.gca()
#plot 0
ax.axhline(0,c='k',lw=1)
#se limits
semin = min(self.se[nqp].real)
semax = max(self.se[nqp].real)
plt.ylim(semin,semax)
#plot self energy
self.plot(ax,nqp=nqp)
#plot omega-e0
emin = min(self.energies[nqp].real)
emax = max(self.energies[nqp].real)
x = np.arange(emin,emax,(emax-emin)/100)
plt.plot(x,x-e0[nqp])
#plot imaginary part of greens funciton
x = self.energies[nqp].real
y = self.green[nqp].imag
plt.plot(x,y/max(y)*semax)
#plot eqp
#plt.axvline(self.eqp[nqp],lw=1)
#plt.axvline(e0[nqp],lw=1)
plt.legend(frameon=False)
plt.show()
self.eqp = np.zeros([self.nqps],dtype=complex)
self.z = np.zeros([self.nqps],dtype=complex)
for nqp in xrange(self.nqps):
#get x and y
x = self.energies[nqp].real
y = self.se[nqp]
#interpolate real part of function
f = interp1d(x,y.real-x+e0[nqp],kind='slinear')
#find zero
eqp = bisect(f,min(x),max(x))
#interpolate whole function
f = interp1d(x,y)
#calculate Z factors
#Z = (1-dSE/de)^(-1)
dse = derivative(f,eqp,dx=1e-8)
z = 1./(1-dse)
#find Im(Se(EQP)) which corresponds to the lifetime
lif = f(eqp).imag
eqp += 1j*lif
#store values
self.eqp[nqp] = eqp
self.z[nqp] = z
#cehck for potential errors
if z>1 and debug:
error(nqp)
return self.eqp, self.z
def __str__(self):
s = ""
s += "nenergies: %d\n"%self.nenergies
s += "nqps: %d"%self.nqps
return s
|
alexmoratalla/yambo-py
|
yambopy/dbs/greendb.py
|
Python
|
bsd-3-clause
| 4,251
|
[
"Yambo"
] |
9c68e436736ca51d07e2b325faf25ca17c978153c24561a087152290b542f1e9
|
APP_NAME = "mvp_chw_visits"
CHW_VISIT_ACTIVE_CASES_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
all_cases=dict(
household_cases_90days=dict(
description="No. of active households in the past 90 days",
title="# Households in past 90 days",
indicator_key="",
case_type="household",
fixed_datespan_months=3,
),
household_cases_30days=dict(
description="No. of Households in the past 30 days",
title="# Households in the past 30 days",
indicator_key="",
case_type="household",
fixed_datespan_months=1,
),
household_cases=dict(
description="No. of Active Households ",
title="# Households in specified time period",
indicator_key="",
case_type="household",
),
pregnancy_cases_6weeks=dict(
description="No. of Active Pregnancies in the Past 6 Weeks",
title="# pregnancies in last 6 weeks",
indicator_key="",
case_type="pregnancy",
fixed_datespan_days=42,
),
pregnancy_cases_30days=dict(
description="No. of Active Pregnancies in the Past 30 Days",
title="# pregnancies in the last 30 days",
indicator_key="",
case_type="pregnancy",
fixed_datespan_months=1,
),
pregnancy_cases=dict(
description="No. of Active Pregnancies",
title="# Pregnancies in specified time period",
indicator_key="",
case_type="pregnancy",
),
)
)
)
CHW_VISIT_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
all_visit_forms=dict(
household_visits=dict(
description="No. of household visits",
title="# Household Visits in specified time period",
indicator_key="household",
),
),
),
)
CHW_VISITS_UNIQUE_COUNT_INDICATORS = dict(
app=APP_NAME,
indicators=dict(
all_visit_forms=dict(
household_visits_90days=dict(
description="No. of household visits in the past 90 days.",
title="# household visits in past 90 days",
indicator_key="household",
fixed_datespan_months=3,
),
household_visits_30days=dict(
description="No. of household visits",
title="# Household Visits",
indicator_key="household",
fixed_datespan_months=1,
),
pregnancy_visits_6weeks=dict(
description="No. of pregnancy visits in the past 6 weeks",
title="# Pregnancy Visits in Past 6 Weeks",
indicator_key="pregnancy",
fixed_datespan_days=42,
),
pregnancy_visits_30days=dict(
description="No. of Pregnancy Visits in the last 30 days",
title="# Pregnancy Visits in Past 30 days",
indicator_key="pregnancy",
fixed_datespan_months=1,
),
under5_visits_30days=dict(
description="No. of Under5 visits",
title="# Under5 Visits",
indicator_key="child under5",
fixed_datespan_months=1,
),
neonate_visits_7days=dict(
description="No. of Neonate visits",
title="# Neonate Visits",
indicator_key="child neonate",
fixed_datespan_days=7,
),
under1_visits=dict(
description="No. of children Under-1 receiving on-time scheduled check-ups during the time period",
title="# Under-1 receiving check-ups",
indicator_key="child under1",
),
under1_visits_6weeks=dict(
description="No. of children Under-1 receiving on-time scheduled check-ups during the time period",
title="# Under-1 receiving check-ups",
fixed_datespan_days=42,
indicator_key="child under1",
),
under1_immunization_up_to_date=dict(
description="No. of children Under-1 with up-to-date immunizations at visit during this time period",
title="# Under-1 up-to-date immunizations",
indicator_key="child under1 immunized",
),
newborn_visits=dict(
description="No. of newborns visited 7 days after birth",
title="# Newborns visited 7 days after birth",
indicator_key="child 7days",
),
under6month_exclusive_breastfeeding=dict(
description="No. of children under 6 months reported as exclusively breast-fed during visit",
title="# Under-6-Months reported as exclusively breast-fed during visit",
indicator_key="child under6mo_ex_breast",
),
under6month_visits=dict(
description="No. of children receiving visit who were under 6 months",
title="# Under-6-Month Visits",
indicator_key="child under6mo",
),
household_num_func_bednets=dict(
description="No. of households ASSESSED with at least one functioning bednet per sleeping site",
title="# of households with at least one functioning bednet per sleeping site",
indicator_key="household atleastonebednet",
),
household_num_bednets=dict(
description="No. of households ASSESSED for functioning bednet",
title="# of households ASSESSED for functioning bednet",
indicator_key="household bednet",
),
num_handwashing_latrine=dict(
description="No. of households ASSESSED with handwashing station within 10m of the latrine",
title="# of households with handwashing station within 10m of the latrine",
indicator_key="household handwashing10metres",
),
num_handwashing=dict(
description="No. of households ASSESSED with handwashing station",
title="# of households with handwashing station",
indicator_key="household handwashing",
),
)
)
)
|
puttarajubr/commcare-hq
|
custom/_legacy/mvp/static_definitions/couch/chw_visits.py
|
Python
|
bsd-3-clause
| 6,673
|
[
"VisIt"
] |
05c83a578bf489faa101803f08f4299eb1ede52f187a96759360f1b92219e324
|
"""
This is the Data Integrity Client which allows the simple reporting of
problematic file and replicas to the IntegrityDB and their status
correctly updated in the FileCatalog.
"""
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Base.Client import Client
__RCSID__ = "$Id$"
class DataIntegrityClient( Client ):
"""
The following methods are supported in the service but are not mentioned explicitly here:
getProblematic()
Obtains a problematic file from the IntegrityDB based on the LastUpdate time
getPrognosisProblematics(prognosis)
Obtains all the problematics of a particular prognosis from the integrityDB
getProblematicsSummary()
Obtains a count of the number of problematics for each prognosis found
getDistinctPrognosis()
Obtains the distinct prognosis found in the integrityDB
getTransformationProblematics(prodID)
Obtains the problematics for a given production
incrementProblematicRetry(fileID)
Increments the retry count for the supplied file ID
changeProblematicPrognosis(fileID,newPrognosis)
Changes the prognosis of the supplied file to the new prognosis
setProblematicStatus(fileID,status)
Updates the status of a problematic in the integrityDB
removeProblematic(self,fileID)
This removes the specified file ID from the integrity DB
insertProblematic(sourceComponent,fileMetadata)
Inserts file with supplied metadata into the integrity DB
"""
def __init__( self, **kwargs ):
super( DataIntegrityClient, self ).__init__( **kwargs )
self.setServer( 'DataManagement/DataIntegrity' )
self.dm = DataManager()
self.fc = FileCatalog()
def setFileProblematic( self, lfn, reason, sourceComponent = '' ):
""" This method updates the status of the file in the FileCatalog and the IntegrityDB
lfn - the lfn of the file
reason - this is given to the integrity DB and should reflect the problem observed with the file
sourceComponent is the component issuing the request.
"""
if isinstance( lfn, list ):
lfns = lfn
elif isinstance( lfn, basestring ):
lfns = [lfn]
else:
errStr = "DataIntegrityClient.setFileProblematic: Supplied file info must be list or a single LFN."
gLogger.error( errStr )
return S_ERROR( errStr )
gLogger.info( "DataIntegrityClient.setFileProblematic: Attempting to update %s files." % len( lfns ) )
fileMetadata = {}
for lfn in lfns:
fileMetadata[lfn] = {'Prognosis':reason, 'LFN':lfn, 'PFN':'', 'SE':''}
res = self.insertProblematic( sourceComponent, fileMetadata )
if not res['OK']:
gLogger.error( "DataIntegrityClient.setReplicaProblematic: Failed to insert problematics to integrity DB" )
return res
def reportProblematicReplicas( self, replicaTuple, se, reason ):
""" Simple wrapper function around setReplicaProblematic """
gLogger.info( 'The following %s files had %s at %s' % ( len( replicaTuple ), reason, se ) )
for lfn, _pfn, se, reason in sorted( replicaTuple ):
if lfn:
gLogger.info( lfn )
res = self.setReplicaProblematic( replicaTuple, sourceComponent = 'DataIntegrityClient' )
if not res['OK']:
gLogger.info( 'Failed to update integrity DB with replicas', res['Message'] )
else:
gLogger.info( 'Successfully updated integrity DB with replicas' )
def setReplicaProblematic( self, replicaTuple, sourceComponent = '' ):
""" This method updates the status of the replica in the FileCatalog and the IntegrityDB
The supplied replicaDict should be of the form {lfn :{'PFN':pfn,'SE':se,'Prognosis':prognosis}
lfn - the lfn of the file
pfn - the pfn if available (otherwise '')
se - the storage element of the problematic replica (otherwise '')
prognosis - this is given to the integrity DB and should reflect the problem observed with the file
sourceComponent is the component issuing the request.
"""
if isinstance( replicaTuple, tuple ):
replicaTuple = [replicaTuple]
elif isinstance( replicaTuple, list ):
pass
else:
errStr = "DataIntegrityClient.setReplicaProblematic: Supplied replica info must be a tuple or list of tuples."
gLogger.error( errStr )
return S_ERROR( errStr )
gLogger.info( "DataIntegrityClient.setReplicaProblematic: Attempting to update %s replicas." % len( replicaTuple ) )
replicaDict = {}
for lfn, pfn, se, reason in replicaTuple:
replicaDict[lfn] = {'Prognosis':reason, 'LFN':lfn, 'PFN':pfn, 'SE':se}
res = self.insertProblematic( sourceComponent, replicaDict )
if not res['OK']:
gLogger.error( "DataIntegrityClient.setReplicaProblematic: Failed to insert problematic to integrity DB" )
return res
for lfn in replicaDict.keys():
replicaDict[lfn]['Status'] = 'Problematic'
res = self.fc.setReplicaStatus( replicaDict )
if not res['OK']:
errStr = "DataIntegrityClient.setReplicaProblematic: Completely failed to update replicas."
gLogger.error( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
##########################################################################
#
# This section contains the resolution methods for various prognoses
#
def __updateCompletedFiles( self, prognosis, fileID ):
gLogger.info( "%s file (%d) is resolved" % ( prognosis, fileID ) )
return self.setProblematicStatus( fileID, 'Resolved' )
def __returnProblematicError( self, fileID, res ):
self.incrementProblematicRetry( fileID )
gLogger.error( 'DataIntegrityClient failure', res['Message'] )
return res
def __updateReplicaToChecked( self, problematicDict ):
lfn = problematicDict['LFN']
fileID = problematicDict['FileID']
prognosis = problematicDict['Prognosis']
problematicDict['Status'] = 'Checked'
res = returnSingleResult( self.fc.setReplicaStatus( {lfn:problematicDict} ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
gLogger.info( "%s replica (%d) is updated to Checked status" % ( prognosis, fileID ) )
return self.__updateCompletedFiles( prognosis, fileID )
def resolveCatalogPFNSizeMismatch( self, problematicDict ):
""" This takes the problematic dictionary returned by the integrity DB and resolved the CatalogPFNSizeMismatch prognosis
"""
lfn = problematicDict['LFN']
se = problematicDict['SE']
fileID = problematicDict['FileID']
res = returnSingleResult( self.fc.getFileSize( lfn ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
catalogSize = res['Value']
res = returnSingleResult( StorageElement( se ).getFileSize( lfn ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
storageSize = res['Value']
bkKCatalog = FileCatalog( ['BookkeepingDB'] )
res = returnSingleResult( bkKCatalog.getFileSize( lfn ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
bookkeepingSize = res['Value']
if bookkeepingSize == catalogSize == storageSize:
gLogger.info( "CatalogPFNSizeMismatch replica (%d) matched all registered sizes." % fileID )
return self.__updateReplicaToChecked( problematicDict )
if catalogSize == bookkeepingSize:
gLogger.info( "CatalogPFNSizeMismatch replica (%d) found to mismatch the bookkeeping also" % fileID )
res = returnSingleResult( self.fc.getReplicas( lfn ) )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
if len( res['Value'] ) <= 1:
gLogger.info( "CatalogPFNSizeMismatch replica (%d) has no other replicas." % fileID )
return S_ERROR( "Not removing catalog file mismatch since the only replica" )
else:
gLogger.info( "CatalogPFNSizeMismatch replica (%d) has other replicas. Removing..." % fileID )
res = self.dm.removeReplica( se, lfn )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
return self.__updateCompletedFiles( 'CatalogPFNSizeMismatch', fileID )
if ( catalogSize != bookkeepingSize ) and ( bookkeepingSize == storageSize ):
gLogger.info( "CatalogPFNSizeMismatch replica (%d) found to match the bookkeeping size" % fileID )
res = self.__updateReplicaToChecked( problematicDict )
if not res['OK']:
return self.__returnProblematicError( fileID, res )
return self.changeProblematicPrognosis( fileID, 'BKCatalogSizeMismatch' )
gLogger.info( "CatalogPFNSizeMismatch replica (%d) all sizes found mismatch. Updating retry count" % fileID )
return self.incrementProblematicRetry( fileID )
############################################################################################
def _reportProblematicFiles( self, lfns, reason ):
""" Simple wrapper function around setFileProblematic
"""
gLogger.info( 'The following %s files were found with %s' % ( len( lfns ), reason ) )
for lfn in sorted( lfns ):
gLogger.info( lfn )
res = self.setFileProblematic( lfns, reason, sourceComponent = 'DataIntegrityClient' )
if not res['OK']:
gLogger.info( 'Failed to update integrity DB with files', res['Message'] )
else:
gLogger.info( 'Successfully updated integrity DB with files' )
|
andresailer/DIRAC
|
DataManagementSystem/Client/DataIntegrityClient.py
|
Python
|
gpl-3.0
| 10,020
|
[
"DIRAC"
] |
5bf11fbe4e44bfd7eaeb6dca867a69c139dd4c10625b2bf088f30b7c49f7e2e4
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 7 14:56:14 2017
@author: yesf
"""
##############################################################################
#为方便与logistic模型结果的比对,评分卡为“颠倒型”,也就是分数越高,违约可能越大
#散度最大化的基本想法是将评分卡设置为特征的线性组合后,最大化好人群和坏人群的散度
#好人群和坏人群的散度定义为:
# 目标函数:max [f(s,G)-f(s,B)]ln[f(s,G)/f(s,B)]ds (Divergence)
# 其简化形式为正态分布化后的散度(Divergence_Normal),或马氏距离(Mahal_Dist)
# 评分卡形式依然为线性组合:sum(C_j*X_ij)
##############################################################################
import sys;
import os;
sys.path.append("allinpay projects/creditscoredivmax")
from imp import reload
import creditscore
reload(creditscore)
from creditscore import CreditScore
import numpy as np
import pandas as pd
import time
from scipy.optimize import minimize
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
class CreditScoreDivergenceMax(CreditScore):
def DivMax_trainandtest(self, testsize, cv, feature_sel, varthreshold, nclusters, \
cmethod, resmethod):
#分割数据集为训练集和测试集
data_feature = self.data.ix[:, self.data.columns != 'default']
data_target = self.data['default']
X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0)
#对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化
X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod)
#在train中做变量筛选, sklearn.feature_selection中的方法
if feature_sel == "VarianceThreshold":
selector = VarianceThreshold(threshold = varthreshold)
X_train1 = pd.DataFrame(selector.fit_transform(X_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "RFECV":
estimator = LogisticRegression()
selector = RFECV(estimator, step=1, cv=cv)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectFromModel":
estimator = LogisticRegression()
selector = SelectFromModel(estimator)
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
elif feature_sel == "SelectKBest":
selector = SelectKBest()
X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train))
X_train1.columns = X_train.columns[selector.get_support(True)]
X_test1 = X_test[X_train1.columns]
else:
X_train1, X_test1 = X_train, X_test
#重采样resampling 解决样本不平衡问题
X_train1, y_train = self.imbalanceddata (X_train1, y_train, resmethod)
X_train_good = []
X_train_bad = []
for i in range(X_train1.shape[0]):
if y_train.iloc[i] == 0:
X_train_good.append(list(X_train1.iloc[i,:]))
elif y_train.iloc[i] == 1:
X_train_bad.append(list(X_train1.iloc[i,:]))
else:
print('error')
# 目标函数
fun = lambda x: -Divergence_Normal(np.dot(X_train_good,x), np.dot(X_train_bad,x))
init = [0.5]*X_train1.shape[1]
res = minimize(fun, init, method = "CG", tol = 1e-9, options={'gtol': 1e-6, 'disp': True})
predcoeff = res.x[:X_train1.shape[1]]
for i in range(0,len(predcoeff)):
print(i+1, predcoeff[i])
score = np.dot(X_test1,predcoeff)
# 需要找到一个得分分布到概率分布的转换:下面为简单线性转换
probability = (score-score.min())/(score.max()-score.min())
predresult = pd.DataFrame({'target' : y_test, 'probability' : probability})
return predresult
def Divergence(list1, list2):
# This function calculates the divergence of good points (in list1) and bad points (in list2)
max_list = max(max(list1),max(list2))
min_list = min(min(list1),min(list2))
sample_size = len(list1) + len(list2)
num_cat = max(int(sample_size / 20),1)
breakpoints = np.arange(min_list, max_list, (max_list-min_list)/num_cat)
breakpoints = np.append(breakpoints, max_list)
labels = np.arange(len(breakpoints) - 1)
datacut1 = pd.cut(list1,bins=breakpoints,right=True,labels=labels,include_lowest=True)
datacut2 = pd.cut(list2,bins=breakpoints,right=True,labels=labels,include_lowest=True)
div = 0
for cat in labels:
dist1 = (datacut1 == cat).sum() / len(list1)
dist2 = (datacut2 == cat).sum() / len(list2)
if dist1 == 0:
dist1 = 0.0001
if dist2 == 0:
dist2 = 0.0001
div = div + (dist1 - dist2) * np.log (dist1/dist2)
return div
def Divergence_Normal(list1, list2):
# This function calculates the divergence of good points (in list1) and bad points (in list2)
# by assuming the good and bad points follow the gaussian distribution
mean1 = np.mean(list1)
mean2 = np.mean(list2)
stdev1 = np.std(list1)
stdev2 = np.std(list2)
dist = ((stdev1**2 + stdev2**2)*(mean1 - mean2)**2 + (stdev1**2 - stdev2**2)**2) / (2 * stdev1**2 * stdev2**2)
return dist
def Mahal_Dist(list1, list2):
# This function calculates the mahal distance of good points (in list1) and bad points (in list2)
# by assuming the good and bad points follow the gaussian distribution and same stdev
mean1 = np.mean(list1)
mean2 = np.mean(list2)
stdev1 = np.std(list1)
stdev2 = np.std(list2)
stdev = (stdev1 + stdev2)/2
dist = abs(mean1 - mean2) / stdev
return dist
|
allinpaybusiness/ACS
|
allinpay projects/creditscoredivmax/classdivmax.py
|
Python
|
apache-2.0
| 6,745
|
[
"Gaussian"
] |
06bc26beadfb86935d4fe91a031c66e7a5ca262c46537213046bee1680118de0
|
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import sys
import numpy as np
import pandas as pd
import pysam
from dna_io import dna_one_hot
################################################################################
# bvcf.py
#
# Methods and classes to support .vcf SNP analysis.
################################################################################
def cap_allele(allele, cap=5):
''' Cap the length of an allele in the figures '''
if len(allele) > cap:
allele = allele[:cap] + '*'
return allele
def snps_seq1(snps, seq_len, genome_fasta, return_seqs=False):
''' Produce an array of one hot coded sequences for a list of SNPs.
Attrs:
snps [SNP] : list of SNPs
seq_len (int) : sequence length to code
genome_fasta (str) : genome FASTA file
Return:
seq_vecs (array) : one hot coded sequences surrounding the SNPs
seq_headers [str] : headers for sequences
seq_snps [SNP] : list of used SNPs
'''
left_len = seq_len//2 - 1
right_len = seq_len//2
# open genome FASTA
genome = pysam.Fastafile(genome_fasta)
# initialize one hot coded vector list
seq_vecs_list = []
# save successful SNPs
seq_snps = []
# save sequence strings, too
seqs = []
# name sequences
seq_headers = []
for snp in snps:
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + len(snp.ref_allele) - snp.longest_alt()
# extract sequence as BED style
if seq_start < 0:
seq = 'N'*(-seq_start) + genome.fetch(snp.chrom, 0, seq_end).upper()
else:
seq = genome.fetch(snp.chrom, seq_start-1, seq_end).upper()
# extend to full length
if len(seq) < seq_end - seq_start:
seq += 'N'*(seq_end-seq_start-len(seq))
# verify that ref allele matches ref sequence
seq_ref = seq[left_len:left_len+len(snp.ref_allele)]
if seq_ref != snp.ref_allele:
if seq_ref not in snp.alt_alleles:
print('WARNING: Skipping %s - neither allele matches reference genome: %s vs %s' % (snp.rsid, snp.ref_allele, seq_ref), file=sys.stderr)
continue
else:
print('WARNING: %s - alt (as opposed to ref) allele matches reference genome; changing reference genome to match.' % (snp.rsid), file=sys.stderr)
# remove alt allele and include ref allele
seq = seq[:left_len] + snp.ref_allele + seq[left_len+len(seq_ref):]
# note that this won't work for indels, but they will be sent to the
# skipping code above because seq_ref will be the wrong length as the
# proper alternative allele
seq_snps.append(snp)
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq, seq_len)
seq_vecs_list.append(seq_vecs_ref)
if return_seqs:
seqs.append(seq_ref)
# name ref allele
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(snp.ref_allele)))
for alt_al in snp.alt_alleles:
# remove ref allele and include alt allele
seq_alt = seq[:left_len] + alt_al + seq[left_len+len(snp.ref_allele):]
# one hot code
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
if return_seqs:
seqs.append(seq_alt)
# name
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(alt_al)))
# stack
seq_vecs = np.vstack(seq_vecs_list)
if return_seqs:
return seq_vecs, seq_headers, seq_snps, seqs
else:
return seq_vecs, seq_headers, seq_snps
def snps2_seq1(snps, seq_len, genome1_fasta, genome2_fasta, return_seqs=False):
''' Produce an array of one hot coded sequences for a list of SNPs.
Attrs:
snps [SNP] : list of SNPs
seq_len (int) : sequence length to code
genome_fasta (str) : major allele genome FASTA file
genome2_fasta (str) : minor allele genome FASTA file
Return:
seq_vecs (array) : one hot coded sequences surrounding the SNPs
seq_headers [str] : headers for sequences
seq_snps [SNP] : list of used SNPs
'''
left_len = seq_len/2 - 1
right_len = seq_len/2
# open genome FASTA
genome1 = pysam.Fastafile(genome1_fasta)
genome2 = pysam.Fastafile(genome2_fasta)
# initialize one hot coded vector list
seq_vecs_list = []
# save successful SNPs
seq_snps = []
# save sequence strings, too
seqs = []
# name sequences
seq_headers = []
for snp in snps:
if len(snp.alt_alleles) > 1:
print('Major/minor genome mode requires only two alleles: %s' % snp.rsid, file=sys.stderr)
exit(1)
alt_al = snp.alt_alleles[0]
# specify positions in GFF-style 1-based
seq_start = snp.pos - left_len
seq_end = snp.pos + right_len + len(snp.ref_allele)
# extract sequence as BED style
if seq_start < 0:
seq_ref = 'N'*(-seq_start) + genome1.fetch(snp.chrom, 0, seq_end).upper()
else:
seq_ref = genome1.fetch(snp.chrom, seq_start-1, seq_end).upper()
# extend to full length
if len(seq_ref) < seq_end - seq_start:
seq_ref += 'N'*(seq_end-seq_start-len(seq_ref))
# verify that ref allele matches ref sequence
seq_ref_snp = seq_ref[left_len:left_len+len(snp.ref_allele)]
if seq_ref_snp != snp.ref_allele:
print('WARNING: Major allele SNP %s doesnt match reference genome: %s vs %s' % (snp.rsid, snp.ref_allele, seq_ref_snp), file=sys.stderr)
exit(1)
# specify positions in GFF-style 1-based
seq_start = snp.pos2 - left_len
seq_end = snp.pos2 + right_len + len(alt_al)
# extract sequence as BED style
if seq_start < 0:
seq_alt = 'N'*(-seq_start) + genome2.fetch(snp.chrom, 0, seq_end).upper()
else:
seq_alt = genome2.fetch(snp.chrom, seq_start-1, seq_end).upper()
# extend to full length
if len(seq_alt) < seq_end - seq_start:
seq_alt += 'N'*(seq_end-seq_start-len(seq_alt))
# verify that ref allele matches ref sequence
seq_alt_snp = seq_alt[left_len:left_len+len(alt_al)]
if seq_alt_snp != alt_al:
print('WARNING: Minor allele SNP %s doesnt match reference genome: %s vs %s' % (snp.rsid, snp.alt_alleles[0], seq_alt_snp), file=sys.stderr)
exit(1)
seq_snps.append(snp)
# one hot code ref allele
seq_vecs_ref, seq_ref = dna_length_1hot(seq_ref, seq_len)
seq_vecs_list.append(seq_vecs_ref)
if return_seqs:
seqs.append(seq_ref)
# name ref allele
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(snp.ref_allele)))
# one hot code alt allele
seq_vecs_alt, seq_alt = dna_length_1hot(seq_alt, seq_len)
seq_vecs_list.append(seq_vecs_alt)
if return_seqs:
seqs.append(seq_alt)
# name
seq_headers.append('%s_%s' % (snp.rsid, cap_allele(alt_al)))
# stack
seq_vecs = np.vstack(seq_vecs_list)
if return_seqs:
return seq_vecs, seq_headers, seq_snps, seqs
else:
return seq_vecs, seq_headers, seq_snps
def dna_length_1hot(seq, length):
''' Adjust the sequence length and compute
a 1hot coding. '''
if length < len(seq):
# trim the sequence
seq_trim = (len(seq)-length)//2
seq = seq[seq_trim:seq_trim+length]
elif length > len(seq):
# extend with N's
nfront = (length-len(seq))//2
nback = length - len(seq) - nfront
seq = 'N'*nfront + seq + 'N'*nback
seq_1hot = dna_one_hot(seq)
return seq_1hot, seq
def vcf_snps(vcf_file, index_snp=False, score=False, pos2=False):
''' Load SNPs from a VCF file '''
vcf_in = open(vcf_file)
# read through header
line = vcf_in.readline()
while line[0] == '#':
line = vcf_in.readline()
# read in SNPs
snps = []
while line:
snps.append(SNP(line, index_snp, score, pos2))
line = vcf_in.readline()
return snps
class SNP:
''' SNP
Represent SNPs read in from a VCF file
Attributes:
vcf_line (str)
'''
def __init__(self, vcf_line, index_snp=False, score=False, pos2=False):
a = vcf_line.split()
if a[0].startswith('chr'):
self.chrom = a[0]
else:
self.chrom = 'chr%s' % a[0]
self.pos = int(a[1])
self.rsid = a[2]
self.ref_allele = a[3]
self.alt_alleles = a[4].split(',')
self.index_snp = '.'
if index_snp:
self.index_snp = a[5]
self.score = None
if score:
self.score = float(a[6])
self.pos2 = None
if pos2:
self.pos2 = int(a[5])
def get_alleles(self):
''' Return a list of all alleles '''
alleles = [self.ref_allele] + self.alt_alleles
return alleles
def longest_alt(self):
''' Return the longest alt allele. '''
return max([len(al) for al in self.alt_alleles])
def __str__(self):
return 'SNP(%s, %s:%d, %s/%s)' % (self.rsid, self.chrom, self.pos, self.ref_allele, ','.join(self.alt_alleles))
|
davek44/Basset
|
src/bvcf.py
|
Python
|
mit
| 9,551
|
[
"pysam"
] |
1382ddd8214fbbbfb644e7f80098c36b521bfc91c3e86c66262f7690167fe59f
|
#!/usr/bin/env python
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='fooltrader', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1a1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Open source quantitative framework for Humans', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/foolcage/fooltrader', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='foolcage', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='5533061@qq.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='stock finance fintech big-data tushare vnpy technical-analysis trading-platform elasticsearch kafka pandas fundamental-analysis quant', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests', 'Scrapy','Twisted','demjson','elasticsearch-dsl','kafka_python','openpyxl','xlrd','pandas','pytdx','apscheduler'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
entry_points={ # Optional
'console_scripts': [
'datamanager=datamanager:main',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/foolcage/fooltrader/issues',
'Funding': 'https://github.com/foolcage/fooltrader',
'Say Thanks!': 'https://saythanks.io/to/foolcage',
'Source': 'https://github.com/foolcage/fooltrader',
},
)
|
foolcage/fooltrader
|
setup.py
|
Python
|
mit
| 7,553
|
[
"VisIt"
] |
4eb877916e5aabb0c3c18dac26b893ac8afdf3b9c2d09dab78818abd8bf35699
|
# -*- coding: utf-8 -*-
# QuickFF is a code to quickly derive accurate force fields from ab initio input.
# Copyright (C) 2012 - 2019 Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>
# Steven Vandenbrande <Steven.Vandenbrande@UGent.be>,
# Jelle Wieme <Jelle.Wieme@UGent.be>,
# Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of QuickFF.
#
# QuickFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# QuickFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
import numpy as np
import os
from molmod.units import *
from molmod.constants import lightspeed
from molmod.periodic import periodic as pt
from molmod.io import load_chk
from yaff import System
from quickff.tools import set_ffatypes
from quickff.program import DeriveFF
from quickff.settings import Settings
from quickff.reference import SecondOrderTaylor
from .common import log, read_system, tmpdir
try:
from importlib.resources import path
except ImportError:
from importlib_resources import path
def test_h2():
#frequency of H2 stretch mode in gaussian.fchk calculation is 4416.656/cm
#and an equilibrium bond length of 0.7442380 A. This test checks if the
#force field predicts the same values
r0 = 0.7442380*angstrom
freq = (2*np.pi)*4416.65640485*lightspeed/centimeter
mass = pt['H'].mass/2 #reduced mass for the H2 stretch mode
#Load system, model and pert. theory and estimate ff
with log.section('NOSETST', 2):
system, ai = read_system('H2/gaussian.fchk')
set_ffatypes(system, 'low')
program = DeriveFF(system, ai, Settings())
program.do_pt_generate()
program.do_pt_estimate()
K_pt, rv_pt = program.valence.get_params(0, only='all')
program.do_hc_estimatefc(['HC_FC_DIAG'])
K_hc, rv_hc = program.valence.get_params(0, only='all')
#print results
print('')
print('AI : K = %.3f kjmol/A^2 q0 = %.6f A' %(mass*freq**2/(kjmol/angstrom**2), r0/angstrom))
print('FF (PT): K = %.3f kjmol/A^2 q0 = %.6f A' %(K_pt/(kjmol/angstrom**2), rv_pt/angstrom))
print('FF (HC): K = %.3f kjmol/A^2 q0 = %.6f A' %(K_hc/(kjmol/angstrom**2), rv_hc/angstrom))
print('')
#perform assertion checks
assert abs(K_pt/(mass*freq**2)-1.0) < 1e-3
assert abs(rv_pt/r0-1.0) < 1e-3
assert abs(K_hc/(mass*freq**2)-1.0) < 1e-3
assert abs(rv_hc/r0-1.0) < 1e-3
assert abs(K_hc/K_pt-1.0) < 1e-6
assert abs(rv_hc/rv_pt-1.0) < 1e-6
def test_output_charmm22():
with log.section('NOSETST', 2):
system, ai = read_system('ethanol/gaussian.fchk')
set_ffatypes(system, 'low')
with tmpdir('test_output_charmm22') as dn:
fn_yaff = os.path.join(dn, 'pars_cov.txt')
fn_charmm22_prm = os.path.join(dn, 'test.prm')
fn_charmm22_psf = os.path.join(dn, 'test.psf')
fn_sys = os.path.join(dn, 'system.chk')
settings = Settings(
do_cross_ASS=False, do_cross_ASA=False,
fn_yaff=fn_yaff, fn_sys=fn_sys,
fn_charmm22_prm=fn_charmm22_prm,
fn_charmm22_psf=fn_charmm22_psf,
)
program = DeriveFF(system, ai, settings)
program.run()
assert os.path.isfile(fn_yaff)
assert os.path.isfile(fn_charmm22_prm)
assert os.path.isfile(fn_charmm22_psf)
assert os.path.isfile(fn_sys)
# Count the number of BOND, ANGLES and DIHEDRAL lines in the PRM file.
counts = {}
with open(fn_charmm22_prm, 'r') as f:
for line in f:
print(line)
line = line[:line.find('!')].strip()
if len(line) == 0:
continue
if line in ['BONDS','ANGLES', 'DIHEDRALS', 'IMPROPER']:
key = line
counts[key] = 0
else:
counts[key] += 1
assert counts['BONDS'] == 4
assert counts['ANGLES'] == 5
assert counts['DIHEDRALS'] == 3
assert counts['IMPROPER'] == 0
# Count the number atoms, bonds, angles and dihedrals in the PSF file and
# check for consistency.
with open(fn_charmm22_psf, 'r') as f:
natom = 0
assert next(f) == 'PSF\n'
for line in f:
if '!NATOM' in line:
natom = int(line.split()[0])
break
assert natom == system.natom
for iatom in range(natom+1):
next(f)
line = next(f)
assert '!NBOND: bonds' in line
nbond = int(line.split()[0])
nline = int(np.ceil(nbond/4.0))
numbers = (''.join([next(f) for iline in range(nline)])).split()
assert len(numbers) == nbond*2
next(f)
line = next(f)
assert '!NTHETA: angles' in line
ntheta = int(line.split()[0])
nline = int(np.ceil(ntheta/3.0))
numbers = (''.join([next(f) for iline in range(nline)])).split()
assert len(numbers) == ntheta*3
next(f)
line = next(f)
assert '!NPHI: dihedrals' in line
nphi = int(line.split()[0])
nline = int(np.ceil(nphi/2.0))
numbers = (''.join([next(f) for iline in range(nline)])).split()
assert len(numbers) == nphi*4
next(f)
line = next(f)
assert '!NIMPHI: impropers' in line
nimphi = int(line.split()[0])
assert nimphi == 0
def compare_crossterm_rest_values(program,equal=True):
print("%50s %15s %15s %15s"%("Basename","Cross RV","Diag RV","Delta"))
for term in program.valence.terms:
if not term.is_master(): continue
if term.basename.startswith('Cross'):
for i in [0,1]:
rv0 = program.valence.get_params(term.index, only='rv%d'%i)
if program.valence.terms[term.diag_term_indexes[i]].basename.startswith('Tors'):
rv0_diag = -program.valence.get_params(term.diag_term_indexes[i], only='sign')
assert (rv0==rv0_diag) # Torsion rest values are always the same
else:
rv0_diag = program.valence.get_params(term.diag_term_indexes[i], only='rv')
assert (rv0==rv0_diag)==equal # Other rest values are only the
# same if consistent_cross_rvs was set to True
print("%50s %15.6f %15.6f %+15.2e" % (term.basename,rv0,rv0_diag,rv0-rv0_diag))
def test_benzene_consistent_crossterms():
with log.section('NOSETEST', 2):
system, ai = read_system('benzene/gaussian.fchk')
set_ffatypes(system, 'high')
for consistent in [False, True]:
with tmpdir('test_benzene_%s'%('consistent' if consistent else 'inconsistent')) as dn:
fn_yaff = os.path.join(dn, 'pars_cov.txt')
fn_sys = os.path.join(dn, 'system.chk')
program = DeriveFF(system, ai, Settings(consistent_cross_rvs=consistent,
fn_yaff=fn_yaff,fn_sys=fn_sys,do_cross_DSS=True,do_cross_DSD=True,
do_cross_DAA=True,do_cross_DAD=True))
program.run()
compare_crossterm_rest_values(program,equal=consistent)
def test_methane_consistent_crossterms():
with log.section('NOSETEST', 2):
system, ai = read_system('methane/gaussian.fchk')
set_ffatypes(system, 'high')
for consistent in [False, True]:
with tmpdir('test_methane_%s'%('consistent' if consistent else 'inconsistent')) as dn:
fn_yaff = os.path.join(dn, 'pars_cov.txt')
fn_sys = os.path.join(dn, 'system.chk')
program = DeriveFF(system, ai, Settings(consistent_cross_rvs=consistent,
fn_yaff=fn_yaff,fn_sys=fn_sys,do_cross_DSS=True,do_cross_DSD=True,
do_cross_DAA=True,do_cross_DAD=True))
program.run()
compare_crossterm_rest_values(program,equal=consistent)
def test_uio66zrbrick_crossterms():
with log.section('NOSETEST', 2):
# Load input data for a ficticious system of an isolated
# UiO-66 brick
with path('quickff.data.systems.uio66-zr-brick', 'system.chk') as fn:
data = load_chk(fn)
system = System(data['numbers'],data['pos'],charges=data['charges'],
ffatypes=data['ffatypes'],bonds=data['bonds'],radii=data['radii'])
system.set_standard_masses()
ai = SecondOrderTaylor('ai', coords=system.pos.copy(),
grad=data['gradient'], hess=data['hessian'])
# Run QuickFF
with tmpdir('test_uio66') as dn:
fn_yaff = os.path.join(dn, 'pars_cov.txt')
fn_sys = os.path.join(dn, 'system.chk')
fn_log = os.path.join(dn, 'quickff.log')
program = DeriveFF(system, ai, Settings(consistent_cross_rvs=True,
remove_dysfunctional_cross=True,fn_yaff=fn_yaff,fn_sys=fn_sys,log_file=fn_log))
program.run()
# Check force constants of cross terms and corresponding diagonal terms
print("%50s %15s %15s"%("Basename","Cross FC","Diag FC"))
for term in program.valence.terms:
if not term.is_master(): continue
if term.basename.startswith('Cross'):
fc = program.valence.get_params(term.index, only='fc')
for i in [0,1]:
fc_diag = program.valence.get_params(term.diag_term_indexes[i], only='fc')
print("%50s %15.6f %15.6f %50s" % (term.basename,fc,fc_diag,program.valence.terms[term.diag_term_indexes[i]].basename))
if fc_diag==0.0: assert fc==0.0
|
molmod/QuickFF
|
quickff/tests/test_examples.py
|
Python
|
gpl-3.0
| 10,660
|
[
"Gaussian"
] |
7dc4e21c4aa6c65d6505e8108ec6bc641339c1cf7da084ef26a0b9f18218e781
|
BASAL_NAME_FOLDER = "RUN_EVOEVO"
BASAL_NAME_PARENT_FOLDER = "./RESULTS"
FILE_NAME_SEPARATOR = "_"
SEP_IN_FILE = "\t"
END_OF_LINE_FILE = "\n"
COMMENTS_IN_FILE = '# '
GAUSSIAN = 0
UNIFORM = 1
TRANSITION_MATRIX = 2
GENE_SIZE = 4
FITNESS_COPY_FUNCTION_WITH_DOTS = 0
FITNESS_COPY_FUNCTION_TRIANGLES = 1
FITNESS_AEVOL = 2
FITNESS_CLUSTER_1 = 3
FITNESS_CLUSTER_2 = 4
FITNESS_CLUSTER_3 = 11
FITNESS_RANDOM = 5
FITNESS_MODE_CLUSTERING_RADIAL_D = 6
FITNESS_MODE_CLUSTERING_COSINE_D = 7
FITNESS_MODE_CLUSTERING_INNER_PRODUCT = 8
FITNESS_MODE_CLUSTERING_INNER_PRODUCT_RADIAL = 9
FITNESS_MODE_CLUSTERING_INNER_PRODUCT_COSINE = 10
PSEUDOGENE_GENE_TYPE = 0
CENTROID_GENE_TYPE = 1
DIMENSION_GENE_TYPE = 2
POSITION_GENE_TYPE = 3
MAX_INT = 100000000
MIN_INT = -100000000
STATS_FITNESS = 0
STATS_CODING_RATIO = 1
STATS_GENOME_LENGTH = 2
STATS_FITNESS_INDEX_POS = 3
STATS_PARENT = 4
STATS_INDEX = 5
STATS_FEATURES = ["fitness","coding_ratio","genome_length","fitness_pos","parent_index","index"]
STATS_TO_DROP = ["fitness_pos"]
STD_AGGREGATION_COLUMNS = ["fitness","coding_ratio","genome_length","best_fitness","best_coding_ratio","best_genome_length","best_nb_core_points","best_avg_core_points_dim","best_nb_clusters","best_avg_clusters_dim","entropy","accuracy","f1","CE"]
STD_USEFULL_STATS_AGGREGATION = ["fitness","coding_ratio","genome_length"]
STD_FUNCTIONAL_EVALUATION_COLUMNS = ["Entropy", "Accuracy", "F1", "CE","RNIA","CE_SSC","Coverage"]
STD_STRUCTURAL_EVALUATION_COLUMNS = ["nb_core_points", "core_points_avg_dim", "nb_clusters", "clusters_avg_dim"]
STD_FITNESS_RANDOM_WALK_STUDY = ["rho_f","tau_f"]
STD_Fw_RANDOM_WALK_STUDY = ["rho_fw","tau_fw"]
STD_Fv_RANDOM_WALK_STUDY = ["rho_fv","tau_fv"]
STD_Fb_RANDOM_WALK_STUDY = ["rho_fb","tau_fb"]
STD_FW_FV_FB_COLUMNS = ["fw","fv","fb"]
NONABSORBINGSTATE = [[0,1],
[1,0]]
PSEUDOGENEABSORBING = [[1,0],
[1,0]]
GENEABSORBING = [[0,1],
[0,1]]
BOTHABSORBING = [[1,0],
[0,1]]
STD_MUT_RATE = 0.005
STD_GENE_SIZE = 4
STD_INIT_GENES = 100
STD_DATA_BUFFER = 1024
STD_SIZE_DATA_POINT = 1024
STD_SIZE_POP_MEMO = 100
STD_GENOME_LIM_SIZE = 10000
STD_CASCADE_MUT = 0
STD_POP_SIZE = 100
STD_SELECT_PRESSURE = 0.8
STD_KMEAN_ITERATIONS = 10
STD_NORM_EXPONENT = 1
STD_FITNESS_MODE = FITNESS_CLUSTER_2
STD_CURRENT_GENERATION_INDEX = 0
STD_LOG_FILE = "test.PICKLE"
STD_PRNG_SEED = 0
STD_SORT_BY_POSITION = 1
STD_ORDER_FOR_INTERGENIC_CUT_OFF = [0,1,2,3]
STD_SAVE_BEST_INDIVIDUAL = 1
STD_NON_CODING_GENOME_FITNESS = -100000000
STD_DIM_GEOMETRIC_WEIGHT = 1.0
STD_RES_DISTANCE_WEIGHT = 0.5
STD_SHUFFLE_GENOME = 0
STD_GENE_MUTATION_PROBABILITIES = [0.25,0.25,0.25,0.25]
STD_PRECISION = 1000
|
speignier/suppositoire
|
definitions.py
|
Python
|
gpl-2.0
| 3,228
|
[
"Gaussian"
] |
11b4746855294abae24a2f4d7d9fddd5874bb3ba40287c0e26746799d6d20140
|
import collections
import os
from xml.etree import ElementTree
from galaxy.util.submodules import submodules
from galaxy import util
from ..metrics import formatting
import logging
log = logging.getLogger( __name__ )
DEFAULT_FORMATTER = formatting.JobMetricFormatter()
class JobMetrics( object ):
def __init__( self, conf_file=None, **kwargs ):
"""
"""
self.plugin_classes = self.__plugins_dict()
self.default_job_instrumenter = JobInstrumenter.from_file( self.plugin_classes, conf_file, **kwargs )
self.job_instrumenters = collections.defaultdict( lambda: self.default_job_instrumenter )
def format( self, plugin, key, value ):
if plugin in self.plugin_classes:
plugin_class = self.plugin_classes[ plugin ]
formatter = plugin_class.formatter
else:
formatter = DEFAULT_FORMATTER
return formatter.format( key, value )
def set_destination_conf_file( self, destination_id, conf_file ):
instrumenter = JobInstrumenter.from_file( self.plugin_classes, conf_file )
self.set_destination_instrumenter( destination_id, instrumenter )
def set_destination_conf_element( self, destination_id, element ):
instrumenter = JobInstrumenter( self.plugin_classes, element )
self.set_destination_instrumenter( destination_id, instrumenter )
def set_destination_instrumenter( self, destination_id, job_instrumenter=None ):
if job_instrumenter is None:
job_instrumenter = NULL_JOB_INSTRUMENTER
self.job_instrumenters[ destination_id ] = job_instrumenter
def collect_properties( self, destination_id, job_id, job_directory ):
return self.job_instrumenters[ destination_id ].collect_properties( job_id, job_directory )
def __plugins_dict( self ):
plugin_dict = {}
for plugin_module in self.__plugin_modules():
for clazz in plugin_module.__all__:
plugin_type = getattr( clazz, 'plugin_type', None )
if plugin_type:
plugin_dict[ plugin_type ] = clazz
return plugin_dict
def __plugin_modules( self ):
import galaxy.jobs.metrics.instrumenters
return submodules( galaxy.jobs.metrics.instrumenters )
class NullJobInstrumenter( object ):
def pre_execute_commands( self, job_directory ):
return None
def post_execute_commands( self, job_directory ):
return None
def collect_properties( self, job_id, job_directory ):
return {}
NULL_JOB_INSTRUMENTER = NullJobInstrumenter()
class JobInstrumenter( object ):
def __init__( self, plugin_classes, metrics_element, **kwargs ):
self.extra_kwargs = kwargs
self.plugin_classes = plugin_classes
self.plugins = self.__plugins_for_element( metrics_element )
def pre_execute_commands( self, job_directory ):
commands = []
for plugin in self.plugins:
try:
plugin_commands = plugin.pre_execute_instrument( job_directory )
if plugin_commands:
commands.extend( util.listify( plugin_commands ) )
except Exception:
log.exception( "Failed to generate pre-execute commands for plugin %s" % plugin )
return "\n".join( [ c for c in commands if c ] )
def post_execute_commands( self, job_directory ):
commands = []
for plugin in self.plugins:
try:
plugin_commands = plugin.post_execute_instrument( job_directory )
if plugin_commands:
commands.extend( util.listify( plugin_commands ) )
except Exception:
log.exception( "Failed to generate post-execute commands for plugin %s" % plugin )
return "\n".join( [ c for c in commands if c ] )
def collect_properties( self, job_id, job_directory ):
per_plugin_properites = {}
for plugin in self.plugins:
try:
properties = plugin.job_properties( job_id, job_directory )
if properties:
per_plugin_properites[ plugin.plugin_type ] = properties
except Exception:
log.exception( "Failed to collect job properties for plugin %s" % plugin )
return per_plugin_properites
def __plugins_for_element( self, plugins_element ):
plugins = []
for plugin_element in plugins_element.getchildren():
plugin_type = plugin_element.tag
plugin_kwds = dict( plugin_element.items() )
plugin_kwds.update( self.extra_kwargs )
plugin = self.plugin_classes[ plugin_type ]( **plugin_kwds )
plugins.append( plugin )
return plugins
@staticmethod
def from_file( plugin_classes, conf_file, **kwargs ):
if not conf_file or not os.path.exists( conf_file ):
return NULL_JOB_INSTRUMENTER
plugins_element = ElementTree.parse( conf_file ).getroot()
return JobInstrumenter( plugin_classes, plugins_element, **kwargs )
|
jmchilton/lwr
|
galaxy/jobs/metrics/__init__.py
|
Python
|
apache-2.0
| 5,101
|
[
"Galaxy"
] |
f813e73d970db18e0f8d70e80e12b91a2d55a180c2b9eb62591885c41944bcae
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
import sys
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/sed3/"))
# from ..extern.sed3 import sed3
# import featurevector
from loguru import logger
# logger = logging.getLogger()
import numpy as np
import scipy.ndimage
# import vtk
import argparse
# @TODO remove logger debug message from the header
logger.debug("before morphology import")
from skimage import morphology
# from PyQt4 import QtCore, QtGui
# from PyQt4.QtGui import *
# from PyQt4.QtCore import Qt
# from PyQt4.QtGui import QApplication
# from PyQt4.QtGui import QApplication, QMainWindow, QWidget,\
# QGridLayout, QLabel, QPushButton, QFrame, QFileDialog,\
# QFont, QInputDialog, QComboBox, QRadioButton, QButtonGroup
# ----------------- my scripts --------
from . import misc
import sed3
# import show3
from . import qmisc
from . import data_manipulation
import imma.image_manipulation as ima
def resection(data, name=None, method='PV',
interactivity=True, seeds=None, **kwargs):
"""
Main resection function.
:param data: dictionaru with data3d, segmentation and slab key.
:param method: "PV", "planar"
:param interactivity: True or False, use seeds if interactivity is False
:param seeds: used as initial interactivity state
:param kwargs: other parameters for resection algorithm
:return:
"""
if method is 'PV':
return resection_old(data, interactivity=interactivity, seeds=seeds)
elif method is 'planar':
return resection_planar(data, interactivity=interactivity, seeds=seeds)
elif method is "PV_new":
return resection_portal_vein_new(data, interactivity=interactivity, seeds=seeds, organ_label=data["slab"]["liver"], vein_label=data["slab"]["porta"])
# return resection_portal_vein_new(data, interactivity=interactivity, seeds=seeds, **kwargs)
else:
return resection_with_3d_visualization(data, **kwargs)
def Rez_podle_roviny(plane, data, voxel):
a = plane.GetNormal()[0] * voxel[0]
b = plane.GetNormal()[1] * voxel[1]
c = plane.GetNormal()[2] * voxel[2]
xx = plane.GetOrigin()[0] / voxel[0]
yy = plane.GetOrigin()[1] / voxel[1]
zz = plane.GetOrigin()[2] / voxel[2]
d = -(a * xx) - (b * yy) - (c * zz)
mensi = 0
vetsi = 0
mensi_objekt = 0
vetsi_objekt = 0
print('x: ', a, ' y: ', b, ' z: ', c)
print('Pocitani rezu...')
prava_strana = np.ones((data.shape[0], data.shape[1], data.shape[2]))
leva_strana = np.ones((data.shape[0], data.shape[1], data.shape[2]))
dimension = data.shape
for x in range(dimension[0]):
for y in range(dimension[1]):
for z in range(dimension[2]):
rovnice = a * x + b * y + c * z + d
if((rovnice) <= 0):
mensi = mensi + 1
if(data[x][y][z] == 1):
mensi_objekt = mensi_objekt + 1
leva_strana[x][y][z] = 0
else:
vetsi = vetsi + 1
if(data[x][y][z] == 1):
vetsi_objekt = vetsi_objekt + 1
prava_strana[x][y][z] = 0
leva_strana = leva_strana * data
objekt = mensi_objekt + vetsi_objekt
odstraneni_procenta = ((100 * mensi_objekt) / objekt)
print(leva_strana)
return leva_strana, odstraneni_procenta
# ----------------------------------------------------------
def cut_editor_old(data, label=None):
logger.debug("editor input label: " + str(label))
if label is None:
contour=data['segmentation']
else:
if type(label) == str:
label = data['slab'][label]
contour=(data['segmentation'] == label).astype(np.int8)
pyed = sed3.sed3qt(data['data3d'], contour=contour)
pyed.exec_()
return pyed.seeds
def split_vessel(datap, seeds, vessel_volume_threshold=0.95, dilatation_iterations=1, input_label="porta",
output_label1 = 1, output_label2 = 2, input_seeds_cut_label=1,
input_seeds_separate_label=3,
input_seeds_label2=None,
method="reach volume",
):
"""
:param datap: data plus format with data3d, segmentation, slab ...
:param seeds: 3d ndarray same size as data3d, label 1 is place where should be vessel cuted. Label 2 points to
the vessel with output label 1 after the segmentation
:param vessel_volume_threshold: this parameter defines the iteration stop rule if method "reach volume is selected
:param dilatation_iterations:
:param input_label: which vessel should be splited
:param output_label1: output label for vessel part marked with right button (if it is used)
:param output_label2: ouput label for not-marked vessel part
:param method: "separate labels" or "reach volume". The first method needs 3 input seeds and it is more stable.
:param input_seeds_separate_label: after the segmentation the object containing this label in seeds would be labeled with
output_label1
:param input_seeds_label2: This parameter is usedf the method is "separate labels". After the
segmentation the object containing this label in seeds would be labeled with output_label1.
:return:
"""
split_obj0 = (seeds == input_seeds_cut_label).astype(np.int8)
split_obj = split_obj0.copy()
# numeric_label = imma.get_nlabel(datap["slab"], input_label)
if method == "separate labels":
input_label = np.max(datap["segmentation"][seeds == input_seeds_label2])
vessels = ima.select_labels(datap["segmentation"], input_label, slab=datap["slab"])
# if type(input_label) is str:
# numeric_label = datap['slab'][input_label]
# else:
# numeric_label = input_label
# vessels = datap['segmentation'] == numeric_label
vesselstmp = vessels
sumall = np.sum(vessels == 1)
# split_obj = scipy.ndimage.binary_dilation(split_obj, iterations = 5 )
# vesselstmp = vessels * (1 - split_obj)
lab, n_obj = scipy.ndimage.label(vesselstmp)
logger.debug("number of objects " + str(n_obj))
# while n_obj < 2 :
# dokud neni z celkoveho objektu ustipnuto alespon 80 procent
not_complete = True
while not_complete:
if method == "reach volume":
not_complete = np.sum(lab == qmisc.max_area_index(lab, n_obj)) > (vessel_volume_threshold * sumall)
elif method == "separate labels":
# misc.
# imma.get_nlabel(datap["slab"], )
# imma.select_labels(seeds,input_seeds_separate_label)
seglab1 = np.max(lab[seeds == input_seeds_separate_label])
seglab2 = np.max(lab[seeds == input_seeds_label2])
if (seglab1 > 0) and (seglab2 > 0) and (seglab1 != seglab2):
not_complete = False
else:
IOError("Unknown method " + str(method))
split_obj = scipy.ndimage.binary_dilation(split_obj, iterations=dilatation_iterations)
vesselstmp = vessels * (1 - split_obj)
lab, n_obj = scipy.ndimage.label(vesselstmp)
if method == "reach volume":
# všechny objekty, na které se to rozpadlo
# pyed = sed3.sed3(lab)
# pyed.show()
obj1 = get_biggest_object(lab)
# vymaz nejvetsiho
lab[obj1 == 1] = 0
obj2 = get_biggest_object(lab)
pixel = 0
pixels = obj1[seeds == input_seeds_separate_label]
if len(pixels) > 0:
pixel = pixels[0]
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # BREAKPOINT
if pixel > 0:
ol1 = output_label1
ol2 = output_label2
else:
ol2 = output_label1
ol1 = output_label2
# first selected pixel with right button
lab = ol1 * obj1 + ol2 * obj2
elif method == "separate labels":
lab = (lab == seglab1) * output_label1 + (lab == seglab2) * output_label2
cut_by_user = split_obj0
return lab, cut_by_user
def Resekce_podle_bodu(data, seeds):
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
data = virtual_resection_visualization(data, segm, dist1, dist2, cut)
return data
def cut_editor(data, inputfile):
# @TODO ošetřit modul viewer viz issue #69
import viewer3
# global normal,coordinates
viewer = viewer3.Viewer(inputfile, 'View')
# zobrazovani jater v kodu
viewer.prohlizej(data, 'View', 'liver')
# mesh = viewer.generate_mesh(segmentation,voxelsize_mm,degrad)
# viewer.View(mesh,False)
# viewer.buttons(window,grid)
# print(viewer.normal)
# print(viewer.coordinates)
'''
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
labels = []
segmentation = segmentation[::degrad,::degrad,::degrad]
print("Generuji data...")
segmentation = segmentation[:,::-1,:]
mesh_data = seg2fem.gen_mesh_from_voxels_mc(segmentation,
voxelsize_mm*degrad)
print("Done")
if True:
mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
vtk_file = "mesh_geom.vtk"
mesh_data.write(vtk_file)
app = QApplication(sys.argv)
#view = viewer3.QVTKViewer(vtk_file,'Cut')
'''
# normal = viewer3.normal_and_coordinates().set_normal()
# coordinates = viewer3.normal_and_coordinates().set_coordinates()
# return normal,coordinates
pass
def change(data, name):
# data['segmentation'][vessels == 2] = data['slab']['porta']
segmentation = data['segmentation']
cut_editor(segmentation == data['slab'][name])
def velikosti(a):
# a_index = [0, 0, 0]
# for x in range(0, len(a)):
# for y in range(0, len(a[0])):
# for z in range(0, len(a[0][0])):
# if a[x][y][z] == 1:
# a_index[0] += 1
# elif a[x][y][z] == 2:
# a_index[1] += 1
# elif a[x][y][z] == 3:
# a_index[2] += 1
mx = np.max(a)
a_index = []
for i in range(1, 4): # for i in range(1, mx + 1):
sm = np.sum(a == i)
a_index.append(sm)
return a_index
def nejnizsi(a, b, c):
if a > b:
if b > c:
return 3
else:
return 2
elif b > c:
if c > a:
return 1
else:
return 3
elif c > a:
if a > b:
return 2
else:
return 1
else:
print("chyba")
def resection_portal_vein_new(data, interactivity=False, seeds=None, organ_label=1, vein_label=2):
"""
New function for portal vein segmentation
:param data:
:param interactivity:
:param seeds:
:param kwargs:
:return:
"""
# ed = sed3.sed3(a)
# ed.show()
# from PyQt4 import QtGui
# from PyQt4.QtGui import QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QLabel, QPushButton, QFrame, \
# QFont, QPixmap, QFileDialog
#
# window = QtGui.QWidget()
# mainLayout = QVBoxLayout()
# window.setLayout(mainLayout)
# mainLayout.addWidget(sed3.sed3qtWidget(data['data3d'], contour=data['segmentation']))
# zachovani puvodnich dat
segmentation = data["segmentation"]
data3d = data["data3d"]
# data pouze se segmentacemi
segm = ((data["segmentation"] == organ_label) * organ_label +
(data["segmentation"] == vein_label) * vein_label)
# ed = sed3.sed3(segm)
# ed.show()
# ufiknutí segmentace
crinfo = qmisc.crinfo_from_specific_data(segm, [0])
data["segmentation"] = qmisc.crop(segm, crinfo)
data["data3d"] = qmisc.crop(data3d, crinfo)
if seeds is not None:
seeds = qmisc.crop(seeds, crinfo)
# @TODO zde nahradit střeve čímkoliv smysluplnějším
if interactivity:
print("Select cut")
# seeds = cut_editor_old(data)
seeds = cut_editor_old(data)
elif seeds is None:
logger.error('seeds is None and interactivity is False')
return None
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
# jatra rozdeleny na 3 kusy
a = morphology.label(segm, background=0)
### podmínka nefunguje
if 3 in a: # zda se v segmentaci objevuje 3. cast
print("slape :) :) :P")
a_index = velikosti(segm)
print(a_index)
i = nejnizsi(a_index[0], a_index[1], a_index[2])
segm = ((a == i) * (segm == 1).astype('int8') +
(a != i)*(segm == 2).astype('int8') +
(segm != 0).astype('int8'))
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
# vrácení původních dat a spojení s upravenými daty
data["data3d"] = data3d
# orig_shape = (len(segmentation), len(segmentation[0]), len(segmentation[1]))
data["segmentation"] = qmisc.uncrop(data["segmentation"], crinfo, orig_shape=segmentation.shape)
#segmentation = segmentation == vein
data["segmentation"] = (data["segmentation"] +
(segmentation != organ_label) * segmentation) - (segmentation == vein_label) * vein_label
return data
def resection_old(data, interactivity=True, seeds=None):
if interactivity:
print("Select cut")
seeds = cut_editor_old(data)
elif seeds is None:
logger.error('seeds is None and interactivity is False')
return None
logger.debug("unique(seeds) " + str(np.unique(seeds)))
# seeds[56][60][78] = 1
lab, cut = split_vessel(data, seeds)
segm, dist1, dist2 = split_organ_by_two_vessels(data, lab)
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
return data
def resection_planar(data, interactivity, seeds=None):
"""
Based on input seeds the cutting plane is constructed
:param data:
:param interactivity:
:param seeds:
:return:
"""
if seeds is None:
if interactivity:
print("Select cut")
seeds = cut_editor_old(data)
else:
logger.error('seeds is None and interactivity is False')
return None
segm, dist1, dist2 = split_organ_by_plane(data, seeds)
cut = dist1**2 < 2
# TODO split this function from visualization
data = virtual_resection_visualization(data, segm, dist1,
dist2, cut,
interactivity=interactivity)
return data
def split_organ_by_plane(data, seeds):
"""
Based on seeds split nonzero segmentation with plane
:param data:
:param seeds:
:return:
"""
from . import geometry3d
from . import data_manipulation
l1 = 1
l2 = 2
point, vector = geometry3d.plane_fit(seeds.nonzero())
dist1 = data_manipulation.split_with_plane(point, vector, data['data3d'].shape)
dist2 = dist1 * -1
segm = (((data['segmentation'] != 0) * (dist1 < dist2)).astype('int8') +
(data['segmentation'] != 0).astype('int8'))
return segm, dist1, dist2
def split_tissue_on_labeled_tree(labeled_branches,
trunk_label, branch_labels,
tissue_segmentation, neighbors_list=None,
ignore_labels=None,
ignore_trunk=True,
on_missed_branch="split",
):
"""
Based on pre-labeled vessel tree split surrounding tissue into two part.
The connected sub tree is computed and used internally.
:param labeled_branches: ndimage with labeled volumetric vessel tree.
:param trunk_label: int
:param branch_labels: list of ints
:param tissue_segmentation: ndimage with bool type. Organ is True, the rest is False.
:param ignore_trunk: True or False
:param ignore_labels: list of labels which will be ignored
:param on_missed_branch: str, ["split", "organ_label", exception]. Missed label is label directly connected
to trunk but with no branch label inside.
"split" will ignore mised label.
"orig" will leave the original area label.
"exception", will throw the exception.
:return:
"""
# bl = lisa.virtual_resection.branch_labels(oseg, "porta")
import imma.measure
import imma.image_manipulation
import imma.image_manipulation as ima
if ignore_labels is None:
ignore_labels = []
ignore_labels = list(ignore_labels)
if ignore_trunk:
ignore_labels.append(trunk_label)
if neighbors_list is None:
exclude = [0]
exclude.extend(ignore_labels)
neighbors_list = imma.measure.neighbors_list(
labeled_branches,
None,
# [seglabel1, seglabel2, seglabel3],
exclude=exclude)
#exclude=[imma.image_manipulation.get_nlabels(slab, ["liver"]), 0])
# ex
# print(neighbors_list)
# find whole branche
# segmentations = [None] * len(branch_labels)
segmentation = np.zeros_like(labeled_branches, dtype=int)
new_branches = []
connected = [None] * len(branch_labels)
for i, branch_label in enumerate(branch_labels):
import copy
ignore_other_branches = copy.copy(branch_labels)
ignore_other_branches.pop(i)
ignore_labels_i = [0]
ignore_labels_i.extend(ignore_other_branches)
ignore_labels_i.extend(ignore_labels)
connected_i = imma.measure.get_connected_labels(
neighbors_list, branch_label, ignore_labels_i)
# segmentations[i] = ima.select_labels(labeled_branches, connected_i).astype(np.int8)
select = ima.select_labels(labeled_branches, connected_i).astype(np.int8)
select = select > 0
if np.max(segmentation[select]) > 0:
logger.debug("Missing branch connected to branch and other branch or trunk.")
union = (segmentation * select) > 0
segmentation[select] = i + 1
if on_missed_branch == "split":
segmentation[union] = 0
elif on_missed_branch == "orig":
new_branche_label = len(branch_labels) + len(new_branches) + 1
logger.debug("new branch label {}".format(new_branche_label))
segmentation[union] = new_branche_label
new_branches.append(new_branche_label)
elif on_missed_branch == "exception":
raise ValueError("Missing one vessel")
else:
raise ValueError("Unknown 'on_missed_label' parameter.")
else:
segmentation[select] = i + 1
# error
# else:
# segmentation[select] = i + 1
connected[i] = connected_i
seg = segmentation
# if np.max(np.sum(segmentations, 0)) > 1:
# raise ValueError("Missing one vessel")
#
# for i, branch_label in enumerate(branch_labels):
# segmentations[i] = segmentations[i] * (i + 1)
# seg = np.sum(segmentations, 0)
# ignore_labels1 = [0, trunk_label, branch_label2]
# ignore_labels1.extend(ignore_labels)
# ignore_labels2 = [0, trunk_label, branch_label]
# ignore_labels2.extend(ignore_labels)
# connected2 = imma.measure.get_connected_labels(
# neighbors_list, branch_label, ignore_labels1)
# connected3 = imma.measure.get_connected_labels(
# neighbors_list, branch_label2, ignore_labels2)
#
# # seg = ima.select_labels(segmentation, organ_label, slab).astype(np.int8)
# seg1 = ima.select_labels(labeled_branches, connected2).astype(np.int8)
# seg2 = ima.select_labels(labeled_branches, connected3).astype(np.int8)
# seg = seg1 + seg2 * 2
# if np.max(seg) > 2:
# ValueError("Missing one vessel")
dseg = ima.distance_segmentation(seg)
logger.debug("output unique labels {}".format(np.unique(dseg)))
# organseg = ima.select_labels(segmentation, organ_label, slab).astype(np.int8)
dseg[~tissue_segmentation.astype(np.bool)] = 0
return dseg, connected
def split_organ_by_two_vessels(datap,
seeds, organ_label=1,
seed_label1=1, seed_label2=2,
weight1=1, weight2=1):
"""
Input of function is ndarray with 2 labeled vessels and data.
Output is segmented organ by vessls using minimum distance criterium.
:param datap: dictionary with 3d data, segmentation, and other information
"data3d": 3d-ndarray with intensity data
"voxelsize_mm",
"segmentation": 3d ndarray with image segmentation
"slab": segmentation labels
:param seeds: ndarray with same size as data3d
1: first part of portal vein (or defined in seed1_label)
2: second part of portal vein (or defined in seed2_label)
:param weight1: distance weight from seed_label1
:param weight2: distance weight from seed_label2
"""
weight1 = 1 if weight1 is None else weight1
slab = datap["slab"]
segmentation = datap["segmentation"]
if type(seed_label1) != list:
seed_label1 = [seed_label1]
if type(seed_label2) != list:
seed_label2 = [seed_label2]
# dist se tady počítá od nul jenom v jedničkách
dist1 = scipy.ndimage.distance_transform_edt(
1 - ima.select_labels(seeds, seed_label1, slab),
# seeds != seed_label1,
sampling=datap['voxelsize_mm']
)
dist2 = scipy.ndimage.distance_transform_edt(
1 - ima.select_labels(seeds, seed_label2, slab),
# seeds != seed_label2,
sampling=datap['voxelsize_mm']
)
# import skfmm
# dist1 = skfmm.distance(
# labeled != l1,
# dx=datap['voxelsize_mm']
# )
# dist2 = skfmm.distance(
# labeled != l2,
# dx=datap['voxelsize_mm']
# )
# print 'skfmm'
# from PyQt4.QtCore import pyqtRemoveInputHook; pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace() # BREAKPOINT
# segm = (dist1 < dist2) * (data['segmentation'] != data['slab']['none'])
target_organ_segmentation = ima.select_labels(segmentation, organ_label, slab)
segm = ((target_organ_segmentation * ((dist1 / weight1) > (dist2 / weight2))).astype('int8') +
target_organ_segmentation.astype('int8'))
return segm, dist1, dist2
def virtual_resection_visualization(data, segm, dist1, dist2, cut,
interactivity=True):
v1, v2 = liver_spit_volume_mm3(segm, data['voxelsize_mm'])
if interactivity:
print("Liver volume: %.4g l" % ((v1 + v2) * 1e-6))
print("volume1: %.4g l (%.3g %%)" % (
(v1) * 1e-6, 100 * v1 / (v1 + v2)))
print("volume2: %.4g l (%.3g %%)" % (
(v2) * 1e-6, 100 * v2 / (v1 + v2)))
# pyed = sed3.sed3(segm)
# pyed.show()
# import pdb; pdb.set_trace()
linie = (((data['segmentation'] != 0) *
(np.abs(dist1 - dist2) < 1))).astype(np.int8)
linie_vis = 2 * linie
linie_vis[cut == 1] = 1
linie_vis = linie_vis.astype(np.int8)
if interactivity:
pyed = sed3.sed3qt(
data['data3d'],
seeds=linie_vis,
contour=(data['segmentation'] != 0))
# pyed.show()
pyed.exec_()
# import pdb; pdb.set_trace()
# show3.show3(data['segmentation'])
slab = {
'liver': 1,
'porta': 2,
'resected_liver': 3,
'resected_porta': 4}
slab.update(data['slab'])
data['slab'] = slab
data['slab']['resected_liver'] = 3
data['slab']['resected_porta'] = 4
mask_resected_liver = (
(segm == 1) & (data['segmentation'] == data['slab']['liver']))
mask_resected_porta = (
(segm == 1) & (data['segmentation'] == data['slab']['porta']))
data['segmentation'][mask_resected_liver] = \
data['slab']['resected_liver']
data['segmentation'][mask_resected_porta] = \
data['slab']['resected_porta']
logger.debug('resection_old() end')
return data
def resection_with_3d_visualization(data, name):
# data['segmentation'][vessels == 2] = data['slab']['porta']
# segmentation = data['segmentation']
# print(data['slab'])
change(data, name)
# print data["slab"]
# change(segmentation == data['slab']['porta'])
# lab = cut_editor(segmentation == data['slab']['porta'])
def get_biggest_object(data):
return qmisc.get_one_biggest_object(data)
def liver_spit_volume_mm3(segm, voxelsize_mm):
"""
segm: 0 - nothing, 1 - remaining tissue, 2 - resected tissue
"""
voxelsize_mm3 = np.prod(voxelsize_mm)
v1 = np.sum(segm == 1) * voxelsize_mm3
v2 = np.sum(segm == 2) * voxelsize_mm3
return v1, v2
def View(name):
data = misc.obj_from_file("out", filetype='pickle')
resection(data, name)
def label_volumetric_vessel_tree(oseg, vessel_label=None, write_to_oseg=True, new_label_str_format="{}{:03d}"):
"""
Split vessel by branches and put it in segmentation and slab.
:param oseg: OrganSegmentation object with segmentation, voxelsize_mm and slab
:param vessel_label: int or string label with vessel. Everything above zero is used if vessel_label is set None.
:param write_to_oseg: Store output into oseg.segmentation if True. The slab is also updated.
:param new_label_str_format: format of new slab
:return:
"""
logger.debug("vessel_label {}".format(vessel_label))
logger.debug("python version {} {}".format(sys.version_info, sys.executable))
import skelet3d
if vessel_label is None:
vessel_volume = oseg.segmentation > 0
else:
vessel_volume = oseg.select_label(vessel_label)
# print(np.unique(vessel_volume))
skel = skelet3d.skelet3d(vessel_volume)
skan = skelet3d.SkeletonAnalyser(skel, volume_data=vessel_volume)
skan.skeleton_analysis()
bl = skan.get_branch_label()
un = np.unique(bl)
logger.debug("skelet3d branch label min: {}, max: {}, dtype: {}".format(np.min(bl), np.max(bl), bl.dtype))
if write_to_oseg:
if 127 < np.max(bl) and ((oseg.segmentation.dtype == np.int8) or (oseg.segmentation.dtype == np.uint8)):
oseg.segmentation = oseg.segmentation.astype(np.int16)
for lb in un:
if lb != 0:
new_slabel = new_label_str_format.format(vessel_label, lb)
new_nlabel = oseg.nlabels(new_slabel)
oseg.segmentation[bl == lb] = new_nlabel
# ima.distance_segmentation(oseg.select_label(vessel_label))
return bl
if __name__ == "__main__":
# # logger = logging.getLogger()
# logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# SectorDisplay2__()
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(description='Segment vessels from liver')
parser.add_argument('-pkl', '--picklefile',
help='input file from organ_segmentation')
parser.add_argument('-oe', '--use_old_editor', action='store_true',
help='use an old editor for vessel cut')
parser.add_argument('-o', '--outputfile', default=None,
help='output file')
parser.add_argument('-oo', '--defaultoutputfile', action='store_true',
help='"vessels.pickle" as output file')
parser.add_argument('-d', '--debug', action='store_true',
help='Debug mode')
args = parser.parse_args()
if (args.picklefile or args.vtkfile) is None:
raise IOError('No input data!')
data = misc.obj_from_file(args.picklefile, filetype='pickle')
ds = data['segmentation'] == data['slab']['liver']
pozice = np.where(ds == 1)
a = pozice[0][0]
b = pozice[1][0]
c = pozice[2][0]
ds = False
# print "vs ", data['voxelsize_mm']
# print "vs ", data['voxelsize_mm']
if args.debug:
logger.setLevel(logging.DEBUG)
# seg = np.zeros([100,100,100])
# seg [50:80, 50:80, 60:75] = 1
# seg[58:60, 56:72, 66:68]=2
# dat = np.random.rand(100,100,100)
# dat [50:80, 50:80, 60:75] = dat [50:80, 50:80, 60:75] + 1
# dat [58:60, 56:72, 66:68] = dat [58:60, 56:72, 66:68] + 1
# slab = {'liver':1, 'porta':2, 'portaa':3, 'portab':4}
# data = {'segmentation':seg, 'data3d':dat, 'slab':slab}
name = 'porta'
# cut_editor(data,args.inputfile)
if args.use_old_editor:
resection(data, name, method=args.use_old_editor)
else:
cut_editor(data, args.picklefile)
# print normal
# print coordinates
defaultoutputfile = "05-resection.pkl"
if args.defaultoutputfile:
args.outputfile = defaultoutputfile
if args.outputfile is None:
savestring = raw_input('Save output data? (y/n): ')
if savestring in ['Y', 'y']:
misc.obj_to_file(data, defaultoutputfile, filetype='pickle')
else:
misc.obj_to_file(data, args.outputfile, filetype='pickle')
|
mjirik/lisa
|
lisa/virtual_resection.py
|
Python
|
bsd-3-clause
| 29,738
|
[
"VTK"
] |
8f692d9d64cbc504b5dd9e5bff16a23a202a8dd9725708921adcd88e6746109f
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from glob import glob
import warnings
from shutil import rmtree
import numpy as np
from nibabel import load
from ... import LooseVersion
from .base import (FSLCommand, FSLCommandInputSpec, Info)
from ..base import (load_template, File, traits, isdefined,
TraitedSpec, BaseInterface, Directory,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec)
from ...utils.filemanip import (list_to_filename, filename_to_list)
from ...utils.misc import human_order_sorted
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class Level1DesignInputSpec(BaseInterfaceInputSpec):
interscan_interval = traits.Float(mandatory=True,
desc='Interscan interval (in secs)')
session_info = traits.Any(mandatory=True,
desc='Session specific information generated by ``modelgen.SpecifyModel``')
bases = traits.Either(
traits.Dict(traits.Enum(
'dgamma'), traits.Dict(traits.Enum('derivs'), traits.Bool)),
traits.Dict(traits.Enum('gamma'), traits.Dict(
traits.Enum('derivs'), traits.Bool)),
traits.Dict(traits.Enum('none'), traits.Enum(None)),
mandatory=True,
desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}")
model_serial_correlations = traits.Bool(
desc="Option to model serial correlations using an \
autoregressive estimator (order 1). Setting this option is only \
useful in the context of the fsf file. If you set this to False, you need to repeat \
this option for FILMGLS by setting autocorr_noestimate to True", mandatory=True)
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum(
'T'),
traits.List(
traits.Str),
traits.List(
traits.Float)),
traits.Tuple(
traits.Str,
traits.Enum(
'T'),
traits.List(
traits.Str),
traits.List(
traits.Float),
traits.List(
traits.Float)))))),
desc="List of contrasts with each contrast being a list of the form - \
[('name', 'stat', [condition list], [weight list], [session list])]. if \
session list is None or not provided, all sessions are used. For F \
contrasts, the condition list should contain previously defined \
T-contrasts.")
class Level1DesignOutputSpec(TraitedSpec):
fsf_files = OutputMultiPath(File(exists=True),
desc='FSL feat specification files')
ev_files = OutputMultiPath(traits.List(File(exists=True)),
desc='condition information files')
class Level1Design(BaseInterface):
"""Generate FEAT specific files
Examples
--------
>>> level1design = Level1Design()
>>> level1design.inputs.interscan_interval = 2.5
>>> level1design.inputs.bases = {'dgamma':{'derivs': False}}
>>> level1design.inputs.session_info = 'session_info.npz'
>>> level1design.run() # doctest: +SKIP
"""
input_spec = Level1DesignInputSpec
output_spec = Level1DesignOutputSpec
def _create_ev_file(self, evfname, evinfo):
f = open(evfname, 'wt')
for i in evinfo:
if len(i) == 3:
f.write('%f %f %f\n' % (i[0], i[1], i[2]))
else:
f.write('%f\n' % i[0])
f.close()
def _create_ev_files(
self, cwd, runinfo, runidx, usetd, contrasts, no_bases,
do_tempfilter):
"""Creates EV files from condition and regressor information.
Parameters:
-----------
runinfo : dict
Generated by `SpecifyModel` and contains information
about events and other regressors.
runidx : int
Index to run number
usetd : int
Whether or not to use temporal derivatives for
conditions
contrasts : list of lists
Information on contrasts to be evaluated
"""
conds = {}
evname = []
ev_hrf = load_template('feat_ev_hrf.tcl')
ev_none = load_template('feat_ev_none.tcl')
ev_ortho = load_template('feat_ev_ortho.tcl')
ev_txt = ''
# generate sections for conditions and other nuisance
# regressors
num_evs = [0, 0]
for field in ['cond', 'regress']:
for i, cond in enumerate(runinfo[field]):
name = cond['name']
evname.append(name)
evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx,
len(evname)))
evinfo = []
num_evs[0] += 1
num_evs[1] += 1
if field == 'cond':
for j, onset in enumerate(cond['onset']):
try:
amplitudes = cond['amplitudes']
if len(amplitudes) > 1:
amp = amplitudes[j]
else:
amp = amplitudes[0]
except KeyError:
amp = 1
if len(cond['duration']) > 1:
evinfo.insert(j, [onset, cond['duration'][j], amp])
else:
evinfo.insert(j, [onset, cond['duration'][0], amp])
if no_bases:
ev_txt += ev_none.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
cond_file=evfname)
else:
ev_txt += ev_hrf.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
temporalderiv=usetd,
cond_file=evfname)
if usetd:
evname.append(name + 'TD')
num_evs[1] += 1
elif field == 'regress':
evinfo = [[j] for j in cond['val']]
ev_txt += ev_none.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
cond_file=evfname)
ev_txt += "\n"
conds[name] = evfname
self._create_ev_file(evfname, evinfo)
# add ev orthogonalization
for i in range(1, num_evs[0] + 1):
for j in range(0, num_evs[0] + 1):
ev_txt += ev_ortho.substitute(c0=i, c1=j)
ev_txt += "\n"
# add contrast info to fsf file
if isdefined(contrasts):
contrast_header = load_template('feat_contrast_header.tcl')
contrast_prolog = load_template('feat_contrast_prolog.tcl')
contrast_element = load_template('feat_contrast_element.tcl')
contrast_ftest_element = load_template(
'feat_contrast_ftest_element.tcl')
contrastmask_header = load_template('feat_contrastmask_header.tcl')
contrastmask_footer = load_template('feat_contrastmask_footer.tcl')
contrastmask_element = load_template(
'feat_contrastmask_element.tcl')
# add t/f contrast info
ev_txt += contrast_header.substitute()
con_names = []
for j, con in enumerate(contrasts):
con_names.append(con[0])
con_map = {}
ftest_idx = []
ttest_idx = []
for j, con in enumerate(contrasts):
if con[1] == 'F':
ftest_idx.append(j)
for c in con[2]:
if c[0] not in con_map.keys():
con_map[c[0]] = []
con_map[c[0]].append(j)
else:
ttest_idx.append(j)
for ctype in ['real', 'orig']:
for j, con in enumerate(contrasts):
if con[1] == 'F':
continue
tidx = ttest_idx.index(j) + 1
ev_txt += contrast_prolog.substitute(cnum=tidx,
ctype=ctype,
cname=con[0])
count = 0
for c in range(1, len(evname) + 1):
if evname[c - 1].endswith('TD') and ctype == 'orig':
continue
count = count + 1
if evname[c - 1] in con[2]:
val = con[3][con[2].index(evname[c - 1])]
else:
val = 0.0
ev_txt += contrast_element.substitute(cnum=tidx,
element=count,
ctype=ctype, val=val)
ev_txt += "\n"
if con[0] in con_map.keys():
for fconidx in con_map[con[0]]:
ev_txt += contrast_ftest_element.substitute(
cnum=ftest_idx.index(fconidx) + 1,
element=tidx,
ctype=ctype,
val=1)
ev_txt += "\n"
# add contrast mask info
ev_txt += contrastmask_header.substitute()
for j, _ in enumerate(contrasts):
for k, _ in enumerate(contrasts):
if j != k:
ev_txt += contrastmask_element.substitute(c1=j + 1,
c2=k + 1)
ev_txt += contrastmask_footer.substitute()
return num_evs, ev_txt
def _format_session_info(self, session_info):
if isinstance(session_info, dict):
session_info = [session_info]
return session_info
def _get_func_files(self, session_info):
"""Returns functional files in the order of runs
"""
func_files = []
for i, info in enumerate(session_info):
func_files.insert(i, info['scans'])
return func_files
def _run_interface(self, runtime):
cwd = os.getcwd()
fsf_header = load_template('feat_header_l1.tcl')
fsf_postscript = load_template('feat_nongui.tcl')
prewhiten = 0
if isdefined(self.inputs.model_serial_correlations):
prewhiten = int(self.inputs.model_serial_correlations)
usetd = 0
no_bases = False
basis_key = self.inputs.bases.keys()[0]
if basis_key in ['dgamma', 'gamma']:
usetd = int(self.inputs.bases[basis_key]['derivs'])
if basis_key == 'none':
no_bases = True
session_info = self._format_session_info(self.inputs.session_info)
func_files = self._get_func_files(session_info)
n_tcon = 0
n_fcon = 0
if isdefined(self.inputs.contrasts):
for i, c in enumerate(self.inputs.contrasts):
if c[1] == 'T':
n_tcon += 1
elif c[1] == 'F':
n_fcon += 1
for i, info in enumerate(session_info):
do_tempfilter = 1
if info['hpf'] == np.inf:
do_tempfilter = 0
num_evs, cond_txt = self._create_ev_files(cwd, info, i, usetd,
self.inputs.contrasts,
no_bases, do_tempfilter)
nim = load(func_files[i])
(_, _, _, timepoints) = nim.get_shape()
fsf_txt = fsf_header.substitute(run_num=i,
interscan_interval=self.inputs.interscan_interval,
num_vols=timepoints,
prewhiten=prewhiten,
num_evs=num_evs[0],
num_evs_real=num_evs[1],
num_tcon=n_tcon,
num_fcon=n_fcon,
high_pass_filter_cutoff=info[
'hpf'],
temphp_yn=do_tempfilter,
func_file=func_files[i])
fsf_txt += cond_txt
fsf_txt += fsf_postscript.substitute(overwrite=1)
f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w')
f.write(fsf_txt)
f.close()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
cwd = os.getcwd()
outputs['fsf_files'] = []
outputs['ev_files'] = []
usetd = 0
basis_key = self.inputs.bases.keys()[0]
if basis_key in ['dgamma', 'gamma']:
usetd = int(self.inputs.bases[basis_key]['derivs'])
for runno, runinfo in enumerate(self._format_session_info(self.inputs.session_info)):
outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno))
outputs['ev_files'].insert(runno, [])
evname = []
for field in ['cond', 'regress']:
for i, cond in enumerate(runinfo[field]):
name = cond['name']
evname.append(name)
evfname = os.path.join(
cwd, 'ev_%s_%d_%d.txt' % (name, runno,
len(evname)))
if field == 'cond':
if usetd:
evname.append(name + 'TD')
outputs['ev_files'][runno].append(
os.path.join(cwd, evfname))
return outputs
class FEATInputSpec(FSLCommandInputSpec):
fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0,
desc="File specifying the feat design spec file")
class FEATOutputSpec(TraitedSpec):
feat_dir = Directory(exists=True)
class FEAT(FSLCommand):
"""Uses FSL feat to calculate first level stats
"""
_cmd = 'feat'
input_spec = FEATInputSpec
output_spec = FEATOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
is_ica = False
outputs['feat_dir']=None
with open(self.inputs.fsf_file, 'rt') as fp:
text = fp.read()
if "set fmri(inmelodic) 1" in text:
is_ica = True
for line in text.split('\n'):
if line.find("set fmri(outputdir)")>-1:
try:
outputdir_spec=line.split('"')[-2]
if os.path.exists(outputdir_spec):
outputs['feat_dir']=outputdir_spec
except:
pass
if not outputs['feat_dir']:
if is_ica:
outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*ica'))[0]
else:
outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*feat'))[0]
print 'Outputs from FEATmodel:',outputs
return outputs
class FEATModelInputSpec(FSLCommandInputSpec):
fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0,
desc="File specifying the feat design spec file",
copyfile=False)
ev_files = traits.List(File(exists=True),
mandatory=True, argstr="%s",
desc="Event spec files generated by level1design",
position=1, copyfile=False)
class FEATModelOutpuSpec(TraitedSpec):
design_file = File(
exists=True, desc='Mat file containing ascii matrix for design')
design_image = File(
exists=True, desc='Graphical representation of design matrix')
design_cov = File(
exists=True, desc='Graphical representation of design covariance')
con_file = File(
exists=True, desc='Contrast file containing contrast vectors')
fcon_file = File(desc='Contrast file containing contrast vectors')
class FEATModel(FSLCommand):
"""Uses FSL feat_model to generate design.mat files
"""
_cmd = 'feat_model'
input_spec = FEATModelInputSpec
output_spec = FEATModelOutpuSpec
def _format_arg(self, name, trait_spec, value):
if name == 'fsf_file':
return super(FEATModel, self)._format_arg(name, trait_spec, self._get_design_root(value))
elif name == 'ev_files':
return ''
else:
return super(FEATModel, self)._format_arg(name, trait_spec, value)
def _get_design_root(self, infile):
_, fname = os.path.split(infile)
return fname.split('.')[0]
def _list_outputs(self):
# TODO: figure out file names and get rid off the globs
outputs = self._outputs().get()
root = self._get_design_root(list_to_filename(self.inputs.fsf_file))
design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root))
assert len(design_file) == 1, 'No mat file generated by FEAT Model'
outputs['design_file'] = design_file[0]
design_image = glob(os.path.join(os.getcwd(), '%s.png' % root))
assert len(
design_image) == 1, 'No design image generated by FEAT Model'
outputs['design_image'] = design_image[0]
design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root))
assert len(
design_cov) == 1, 'No covariance image generated by FEAT Model'
outputs['design_cov'] = design_cov[0]
con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root))
assert len(con_file) == 1, 'No con file generated by FEAT Model'
outputs['con_file'] = con_file[0]
fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root))
if fcon_file:
assert len(fcon_file) == 1, 'No fts file generated by FEAT Model'
outputs['fcon_file'] = fcon_file[0]
return outputs
class FILMGLSInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-3,
argstr='%s',
desc='input data file')
design_file = File(exists=True, position=-2,
argstr='%s',
desc='design matrix file')
threshold = traits.Range(default=1000., low=0.0, argstr='%f',
position=-1, usedefault=True,
desc='threshold')
smooth_autocorr = traits.Bool(argstr='-sa',
desc='Smooth auto corr estimates')
mask_size = traits.Int(argstr='-ms %d',
desc="susan mask size")
brightness_threshold = traits.Range(low=0, argstr='-epith %d',
desc='susan brightness threshold, otherwise it is estimated')
full_data = traits.Bool(argstr='-v', desc='output full data')
_estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window',
'multitaper_product', 'use_pava', 'autocorr_noestimate']
autocorr_estimate_only = traits.Bool(argstr='-ac',
xor=_estimate_xor,
desc='perform autocorrelation estimatation only')
fit_armodel = traits.Bool(argstr='-ar', xor=_estimate_xor,
desc='fits autoregressive model - default is to use tukey with M=sqrt(numvols)')
tukey_window = traits.Int(argstr='-tukey %d', xor=_estimate_xor,
desc='tukey window size to estimate autocorr')
multitaper_product = traits.Int(argstr='-mt %d', xor=_estimate_xor,
desc='multitapering with slepian tapers and num is the time-bandwidth product')
use_pava = traits.Bool(
argstr='-pava', desc='estimates autocorr using PAVA')
autocorr_noestimate = traits.Bool(argstr='-noest', xor=_estimate_xor,
desc='do not estimate autocorrs')
output_pwdata = traits.Bool(argstr='-output_pwdata',
desc='output prewhitened data and average design matrix')
results_dir = Directory('results', argstr='-rn %s', usedefault=True,
desc='directory to store results in')
class FILMGLSInputSpec505(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-3,
argstr='--in=%s', desc='input data file')
design_file = File(exists=True, position=-2,
argstr='--pd=%s', desc='design matrix file')
threshold = traits.Range(default=1000., low=0.0, argstr='--thr=%f',
position=-1, usedefault=True, desc='threshold')
smooth_autocorr = traits.Bool(argstr='--sa',
desc='Smooth auto corr estimates')
mask_size = traits.Int(argstr='--ms=%d', desc="susan mask size")
brightness_threshold = traits.Range(low=0, argstr='--epith=%d',
desc=('susan brightness threshold, '
'otherwise it is estimated'))
full_data = traits.Bool(argstr='-v', desc='output full data')
_estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window',
'multitaper_product', 'use_pava', 'autocorr_noestimate']
autocorr_estimate_only = traits.Bool(argstr='--ac', xor=_estimate_xor,
desc=('perform autocorrelation '
'estimation only'))
fit_armodel = traits.Bool(argstr='--ar', xor=_estimate_xor,
desc=('fits autoregressive model - default is to '
'use tukey with M=sqrt(numvols)'))
tukey_window = traits.Int(argstr='--tukey=%d', xor=_estimate_xor,
desc='tukey window size to estimate autocorr')
multitaper_product = traits.Int(argstr='--mt=%d', xor=_estimate_xor,
desc=('multitapering with slepian tapers '
'and num is the time-bandwidth '
'product'))
use_pava = traits.Bool(argstr='--pava', desc='estimates autocorr using PAVA')
autocorr_noestimate = traits.Bool(argstr='--noest', xor=_estimate_xor,
desc='do not estimate autocorrs')
output_pwdata = traits.Bool(argstr='--outputPWdata',
desc=('output prewhitened data and average '
'design matrix'))
results_dir = Directory('results', argstr='--rn=%s', usedefault=True,
desc='directory to store results in')
class FILMGLSOutputSpec(TraitedSpec):
param_estimates = OutputMultiPath(File(exists=True),
desc='Parameter estimates for each column of the design matrix')
residual4d = File(exists=True,
desc='Model fit residual mean-squared error for each time point')
dof_file = File(exists=True, desc='degrees of freedom')
sigmasquareds = File(
exists=True, desc='summary of residuals, See Woolrich, et. al., 2001')
results_dir = Directory(exists=True,
desc='directory storing model estimation output')
corrections = File(exists=True,
desc='statistical corrections used within FILM modelling')
logfile = File(exists=True,
desc='FILM run logfile')
class FILMGLS(FSLCommand):
"""Use FSL film_gls command to fit a design matrix to voxel timeseries
Examples
--------
Initialize with no options, assigning them when calling run:
>>> from nipype.interfaces import fsl
>>> fgls = fsl.FILMGLS()
>>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP
Assign options through the ``inputs`` attribute:
>>> fgls = fsl.FILMGLS()
>>> fgls.inputs.in_file = 'functional.nii'
>>> fgls.inputs.design_file = 'design.mat'
>>> fgls.inputs.threshold = 10
>>> fgls.inputs.results_dir = 'stats'
>>> res = fgls.run() #doctest: +SKIP
Specify options when creating an instance:
>>> fgls = fsl.FILMGLS(in_file='functional.nii', \
design_file='design.mat', \
threshold=10, results_dir='stats')
>>> res = fgls.run() #doctest: +SKIP
"""
_cmd = 'film_gls'
if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.4'):
input_spec = FILMGLSInputSpec505
else:
input_spec = FILMGLSInputSpec
output_spec = FILMGLSOutputSpec
def _get_pe_files(self, cwd):
files = None
if isdefined(self.inputs.design_file):
fp = open(self.inputs.design_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumWaves'):
numpes = int(line.split()[-1])
files = []
for i in range(numpes):
files.append(self._gen_fname('pe%d.nii' % (i + 1),
cwd=cwd))
break
fp.close()
return files
def _list_outputs(self):
outputs = self._outputs().get()
cwd = os.getcwd()
results_dir = os.path.join(cwd, self.inputs.results_dir)
outputs['results_dir'] = results_dir
pe_files = self._get_pe_files(results_dir)
if pe_files:
outputs['param_estimates'] = pe_files
outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir)
outputs['dof_file'] = os.path.join(results_dir, 'dof')
outputs['sigmasquareds'] = self._gen_fname('sigmasquareds.nii',
cwd=results_dir)
outputs['corrections'] = self._gen_fname('corrections.nii',
cwd=results_dir)
outputs['logfile'] = self._gen_fname('logfile',
change_ext=False,
cwd=results_dir)
return outputs
class FEATRegisterInputSpec(BaseInterfaceInputSpec):
feat_dirs = InputMultiPath(
Directory(exists=True), desc="Lower level feat dirs",
mandatory=True)
reg_image = File(
exists=True, desc="image to register to (will be treated as standard)",
mandatory=True)
reg_dof = traits.Int(
12, desc="registration degrees of freedom", usedefault=True)
class FEATRegisterOutputSpec(TraitedSpec):
fsf_file = File(exists=True,
desc="FSL feat specification file")
class FEATRegister(BaseInterface):
"""Register feat directories to a specific standard
"""
input_spec = FEATRegisterInputSpec
output_spec = FEATRegisterOutputSpec
def _run_interface(self, runtime):
fsf_header = load_template('featreg_header.tcl')
fsf_footer = load_template('feat_nongui.tcl')
fsf_dirs = load_template('feat_fe_featdirs.tcl')
num_runs = len(self.inputs.feat_dirs)
fsf_txt = fsf_header.substitute(num_runs=num_runs,
regimage=self.inputs.reg_image,
regdof=self.inputs.reg_dof)
for i, rundir in enumerate(filename_to_list(self.inputs.feat_dirs)):
fsf_txt += fsf_dirs.substitute(runno=i + 1,
rundir=os.path.abspath(rundir))
fsf_txt += fsf_footer.substitute()
f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt')
f.write(fsf_txt)
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['fsf_file'] = os.path.abspath(
os.path.join(os.getcwd(), 'register.fsf'))
return outputs
class FLAMEOInputSpec(FSLCommandInputSpec):
cope_file = File(exists=True, argstr='--copefile=%s', mandatory=True,
desc='cope regressor data file')
var_cope_file = File(exists=True, argstr='--varcopefile=%s',
desc='varcope weightings data file')
dof_var_cope_file = File(exists=True, argstr='--dofvarcopefile=%s',
desc='dof data file for varcope data')
mask_file = File(exists=True, argstr='--maskfile=%s', mandatory=True,
desc='mask file')
design_file = File(exists=True, argstr='--designfile=%s', mandatory=True,
desc='design matrix file')
t_con_file = File(
exists=True, argstr='--tcontrastsfile=%s', mandatory=True,
desc='ascii matrix specifying t-contrasts')
f_con_file = File(exists=True, argstr='--fcontrastsfile=%s',
desc='ascii matrix specifying f-contrasts')
cov_split_file = File(
exists=True, argstr='--covsplitfile=%s', mandatory=True,
desc='ascii matrix specifying the groups the covariance is split into')
run_mode = traits.Enum(
'fe', 'ols', 'flame1', 'flame12', argstr='--runmode=%s',
mandatory=True, desc='inference to perform')
n_jumps = traits.Int(
argstr='--njumps=%d', desc='number of jumps made by mcmc')
burnin = traits.Int(argstr='--burnin=%d',
desc='number of jumps at start of mcmc to be discarded')
sample_every = traits.Int(argstr='--sampleevery=%d',
desc='number of jumps for each sample')
fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit')
infer_outliers = traits.Bool(argstr='--inferoutliers',
desc='infer outliers - not for fe')
no_pe_outputs = traits.Bool(argstr='--nopeoutput',
desc='do not output pe files')
sigma_dofs = traits.Int(argstr='--sigma_dofs=%d',
desc='sigma (in mm) to use for Gaussian smoothing the DOFs in FLAME 2. Default is 1mm, -1 indicates no smoothing')
outlier_iter = traits.Int(argstr='--ioni=%d',
desc='Number of max iterations to use when inferring outliers. Default is 12.')
log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds
# no support for ven, vef
class FLAMEOOutputSpec(TraitedSpec):
pes = OutputMultiPath(File(exists=True),
desc=("Parameter estimates for each column of the "
"design matrix for each voxel"))
res4d = OutputMultiPath(File(exists=True),
desc=("Model fit residual mean-squared error for "
"each time point"))
copes = OutputMultiPath(File(exists=True),
desc="Contrast estimates for each contrast")
var_copes = OutputMultiPath(File(exists=True),
desc="Variance estimates for each contrast")
zstats = OutputMultiPath(File(exists=True),
desc="z-stat file for each contrast")
tstats = OutputMultiPath(File(exists=True),
desc="t-stat file for each contrast")
zfstats = OutputMultiPath(File(exists=True),
desc="z stat file for each f contrast")
fstats = OutputMultiPath(File(exists=True),
desc="f-stat file for each contrast")
mrefvars = OutputMultiPath(File(exists=True),
desc=("mean random effect variances for each "
"contrast"))
tdof = OutputMultiPath(File(exists=True),
desc="temporal dof file for each contrast")
weights = OutputMultiPath(File(exists=True),
desc="weights file for each contrast")
stats_dir = Directory(File(exists=True),
desc="directory storing model estimation output")
class FLAMEO(FSLCommand):
"""Use FSL flameo command to perform higher level model fits
Examples
--------
Initialize FLAMEO with no options, assigning them when calling run:
>>> from nipype.interfaces import fsl
>>> import os
>>> flameo = fsl.FLAMEO(cope_file='cope.nii.gz', \
var_cope_file='varcope.nii.gz', \
cov_split_file='cov_split.mat', \
design_file='design.mat', \
t_con_file='design.con', \
mask_file='mask.nii', \
run_mode='fe')
>>> flameo.cmdline
'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz'
"""
_cmd = 'flameo'
input_spec = FLAMEOInputSpec
output_spec = FLAMEOOutputSpec
# ohinds: 2010-04-06
def _run_interface(self, runtime):
log_dir = self.inputs.log_dir
cwd = os.getcwd()
if os.access(os.path.join(cwd, log_dir), os.F_OK):
rmtree(os.path.join(cwd, log_dir))
return super(FLAMEO, self)._run_interface(runtime)
# ohinds: 2010-04-06
# made these compatible with flameo
def _list_outputs(self):
outputs = self._outputs().get()
pth = os.path.join(os.getcwd(), self.inputs.log_dir)
pes = human_order_sorted(glob(os.path.join(pth, 'pe[0-9]*.*')))
assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate'
outputs['pes'] = pes
res4d = human_order_sorted(glob(os.path.join(pth, 'res4d.*')))
assert len(res4d) == 1, 'No residual volume generated by FSL Estimate'
outputs['res4d'] = res4d[0]
copes = human_order_sorted(glob(os.path.join(pth, 'cope[0-9]*.*')))
assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate'
outputs['copes'] = copes
var_copes = human_order_sorted(
glob(os.path.join(pth, 'varcope[0-9]*.*')))
assert len(
var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate'
outputs['var_copes'] = var_copes
zstats = human_order_sorted(glob(os.path.join(pth, 'zstat[0-9]*.*')))
assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate'
outputs['zstats'] = zstats
if isdefined(self.inputs.f_con_file):
zfstats = human_order_sorted(
glob(os.path.join(pth, 'zfstat[0-9]*.*')))
assert len(
zfstats) >= 1, 'No zfstat volumes generated by FSL CEstimate'
outputs['zfstats'] = zfstats
fstats = human_order_sorted(
glob(os.path.join(pth, 'fstat[0-9]*.*')))
assert len(
fstats) >= 1, 'No fstat volumes generated by FSL CEstimate'
outputs['fstats'] = fstats
tstats = human_order_sorted(glob(os.path.join(pth, 'tstat[0-9]*.*')))
assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate'
outputs['tstats'] = tstats
mrefs = human_order_sorted(
glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*')))
assert len(
mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO'
outputs['mrefvars'] = mrefs
tdof = human_order_sorted(glob(os.path.join(pth, 'tdof_t[0-9]*.*')))
assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO'
outputs['tdof'] = tdof
weights = human_order_sorted(
glob(os.path.join(pth, 'weights[0-9]*.*')))
assert len(weights) >= 1, 'No weight volumes generated by FLAMEO'
outputs['weights'] = weights
outputs['stats_dir'] = pth
return outputs
class ContrastMgrInputSpec(FSLCommandInputSpec):
tcon_file = File(exists=True, mandatory=True,
argstr='%s', position=-1,
desc='contrast file containing T-contrasts')
fcon_file = File(exists=True, argstr='-f %s',
desc='contrast file containing F-contrasts')
param_estimates = InputMultiPath(File(exists=True),
argstr='', copyfile=False,
mandatory=True,
desc='Parameter estimates for each column of the design matrix')
corrections = File(exists=True, copyfile=False, mandatory=True,
desc='statistical corrections used within FILM modelling')
dof_file = File(exists=True, argstr='', copyfile=False, mandatory=True,
desc='degrees of freedom')
sigmasquareds = File(exists=True, argstr='', position=-2,
copyfile=False, mandatory=True,
desc='summary of residuals, See Woolrich, et. al., 2001')
contrast_num = traits.Range(low=1, argstr='-cope',
desc='contrast number to start labeling copes from')
suffix = traits.Str(argstr='-suffix %s',
desc='suffix to put on the end of the cope filename before the contrast number, default is nothing')
class ContrastMgrOutputSpec(TraitedSpec):
copes = OutputMultiPath(File(exists=True),
desc='Contrast estimates for each contrast')
varcopes = OutputMultiPath(File(exists=True),
desc='Variance estimates for each contrast')
zstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each contrast')
tstats = OutputMultiPath(File(exists=True),
desc='t-stat file for each contrast')
fstats = OutputMultiPath(File(exists=True),
desc='f-stat file for each contrast')
zfstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each F contrast')
neffs = OutputMultiPath(File(exists=True),
desc='neff file ?? for each contrast')
class ContrastMgr(FSLCommand):
"""Use FSL contrast_mgr command to evaluate contrasts
In interface mode this file assumes that all the required inputs are in the
same location.
"""
_cmd = 'contrast_mgr'
input_spec = ContrastMgrInputSpec
output_spec = ContrastMgrOutputSpec
def _run_interface(self, runtime):
# The returncode is meaningless in ContrastMgr. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(ContrastMgr, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _format_arg(self, name, trait_spec, value):
if name in ['param_estimates', 'corrections', 'dof_file']:
return ''
elif name in ['sigmasquareds']:
path, _ = os.path.split(value)
return path
else:
return super(ContrastMgr, self)._format_arg(name, trait_spec, value)
def _get_design_root(self, infile):
_, fname = os.path.split(infile)
return fname.split('.')[0]
def _get_numcons(self):
numtcons = 0
numfcons = 0
if isdefined(self.inputs.tcon_file):
fp = open(self.inputs.tcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numtcons = int(line.split()[-1])
break
fp.close()
if isdefined(self.inputs.fcon_file):
fp = open(self.inputs.fcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numfcons = int(line.split()[-1])
break
fp.close()
return numtcons, numfcons
def _list_outputs(self):
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.sigmasquareds)
numtcons, numfcons = self._get_numcons()
base_contrast = 1
if isdefined(self.inputs.contrast_num):
base_contrast = self.inputs.contrast_num
copes = []
varcopes = []
zstats = []
tstats = []
neffs = []
for i in range(numtcons):
copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i),
cwd=pth))
varcopes.append(
self._gen_fname('varcope%d.nii' % (base_contrast + i),
cwd=pth))
zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i),
cwd=pth))
tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i),
cwd=pth))
neffs.append(self._gen_fname('neff%d.nii' % (base_contrast + i),
cwd=pth))
if copes:
outputs['copes'] = copes
outputs['varcopes'] = varcopes
outputs['zstats'] = zstats
outputs['tstats'] = tstats
outputs['neffs'] = neffs
fstats = []
zfstats = []
for i in range(numfcons):
fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i),
cwd=pth))
zfstats.append(
self._gen_fname('zfstat%d.nii' % (base_contrast + i),
cwd=pth))
if fstats:
outputs['fstats'] = fstats
outputs['zfstats'] = zfstats
return outputs
class L2ModelInputSpec(BaseInterfaceInputSpec):
num_copes = traits.Range(low=1, mandatory=True,
desc='number of copes to be combined')
class L2ModelOutputSpec(TraitedSpec):
design_mat = File(exists=True, desc='design matrix file')
design_con = File(exists=True, desc='design contrast file')
design_grp = File(exists=True, desc='design group file')
class L2Model(BaseInterface):
"""Generate subject specific second level model
Examples
--------
>>> from nipype.interfaces.fsl import L2Model
>>> model = L2Model(num_copes=3) # 3 sessions
"""
input_spec = L2ModelInputSpec
output_spec = L2ModelOutputSpec
def _run_interface(self, runtime):
cwd = os.getcwd()
mat_txt = ['/NumWaves 1',
'/NumPoints %d' % self.inputs.num_copes,
'/PPheights %e' % 1,
'',
'/Matrix']
for i in range(self.inputs.num_copes):
mat_txt += ['%e' % 1]
mat_txt = '\n'.join(mat_txt)
con_txt = ['/ContrastName1 group mean',
'/NumWaves 1',
'/NumContrasts 1',
'/PPheights %e' % 1,
'/RequiredEffect 100.0', # XX where does this
# number come from
'',
'/Matrix',
'%e' % 1]
con_txt = '\n'.join(con_txt)
grp_txt = ['/NumWaves 1',
'/NumPoints %d' % self.inputs.num_copes,
'',
'/Matrix']
for i in range(self.inputs.num_copes):
grp_txt += ['1']
grp_txt = '\n'.join(grp_txt)
txt = {'design.mat': mat_txt,
'design.con': con_txt,
'design.grp': grp_txt}
# write design files
for i, name in enumerate(['design.mat', 'design.con', 'design.grp']):
f = open(os.path.join(cwd, name), 'wt')
f.write(txt[name])
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for field in outputs.keys():
outputs[field] = os.path.join(os.getcwd(),
field.replace('_', '.'))
return outputs
class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec):
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(
traits.Str),
traits.List(
traits.Float)),
))),
mandatory=True,
desc="List of contrasts with each contrast being a list of the form - \
[('name', 'stat', [condition list], [weight list])]. if \
session list is None or not provided, all sessions are used. For F \
contrasts, the condition list should contain previously defined \
T-contrasts without any weight list.")
regressors = traits.Dict(traits.Str, traits.List(traits.Float),
mandatory=True,
desc='dictionary containing named lists of regressors')
groups = traits.List(traits.Int,
desc='list of group identifiers (defaults to single group)')
class MultipleRegressDesignOutputSpec(TraitedSpec):
design_mat = File(exists=True, desc='design matrix file')
design_con = File(exists=True, desc='design t-contrast file')
design_fts = File(exists=True, desc='design f-contrast file')
design_grp = File(exists=True, desc='design group file')
class MultipleRegressDesign(BaseInterface):
"""Generate multiple regression design
.. note::
FSL does not demean columns for higher level analysis.
Please see `FSL documentation <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#higher>`_
for more details on model specification for higher level analysis.
Examples
--------
>>> from nipype.interfaces.fsl import MultipleRegressDesign
>>> model = MultipleRegressDesign()
>>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]]
>>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3])
>>> model.run() # doctest: +SKIP
"""
input_spec = MultipleRegressDesignInputSpec
output_spec = MultipleRegressDesignOutputSpec
def _run_interface(self, runtime):
cwd = os.getcwd()
regs = sorted(self.inputs.regressors.keys())
nwaves = len(regs)
npoints = len(self.inputs.regressors[regs[0]])
ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T'])
nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F'])
# write mat file
mat_txt = ['/NumWaves %d' % nwaves,
'/NumPoints %d' % npoints]
ppheights = []
for reg in regs:
maxreg = np.max(self.inputs.regressors[reg])
minreg = np.min(self.inputs.regressors[reg])
if np.sign(maxreg) == np.sign(minreg):
regheight = max([abs(minreg), abs(maxreg)])
else:
regheight = abs(maxreg - minreg)
ppheights.append('%e' % regheight)
mat_txt += ['/PPheights ' + ' '.join(ppheights)]
mat_txt += ['',
'/Matrix']
for cidx in range(npoints):
mat_txt.append(' '.join(
['%e' % self.inputs.regressors[key][cidx] for key in regs]))
mat_txt = '\n'.join(mat_txt) + '\n'
# write t-con file
con_txt = []
counter = 0
tconmap = {}
for conidx, con in enumerate(self.inputs.contrasts):
if con[1] == 'T':
tconmap[conidx] = counter
counter += 1
con_txt += ['/ContrastName%d %s' % (counter, con[0])]
con_txt += ['/NumWaves %d' % nwaves,
'/NumContrasts %d' % ntcons,
'/PPheights %s' % ' '.join(
['%e' % 1 for i in range(counter)]),
'/RequiredEffect %s' % ' '.join(
['%.3f' % 100 for i in range(counter)]),
'',
'/Matrix']
for idx in sorted(tconmap.keys()):
convals = np.zeros((nwaves, 1))
for regidx, reg in enumerate(self.inputs.contrasts[idx][2]):
convals[regs.index(reg)
] = self.inputs.contrasts[idx][3][regidx]
con_txt.append(' '.join(['%e' % val for val in convals]))
con_txt = '\n'.join(con_txt) + '\n'
# write f-con file
fcon_txt = ''
if nfcons:
fcon_txt = ['/NumWaves %d' % ntcons,
'/NumContrasts %d' % nfcons,
'',
'/Matrix']
for conidx, con in enumerate(self.inputs.contrasts):
if con[1] == 'F':
convals = np.zeros((ntcons, 1))
for tcon in con[2]:
convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1
fcon_txt.append(' '.join(['%d' % val for val in convals]))
fcon_txt = '\n'.join(fcon_txt)
fcon_txt += '\n'
# write group file
grp_txt = ['/NumWaves 1',
'/NumPoints %d' % npoints,
'',
'/Matrix']
for i in range(npoints):
if isdefined(self.inputs.groups):
grp_txt += ['%d' % self.inputs.groups[i]]
else:
grp_txt += ['1']
grp_txt = '\n'.join(grp_txt) + '\n'
txt = {'design.mat': mat_txt,
'design.con': con_txt,
'design.fts': fcon_txt,
'design.grp': grp_txt}
# write design files
for key, val in txt.items():
if ('fts' in key) and (nfcons == 0):
continue
filename = key.replace('_', '.')
f = open(os.path.join(cwd, filename), 'wt')
f.write(val)
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F'])
for field in outputs.keys():
if ('fts' in field) and (nfcons == 0):
continue
outputs[field] = os.path.join(os.getcwd(),
field.replace('_', '.'))
return outputs
class SMMInputSpec(FSLCommandInputSpec):
spatial_data_file = File(
exists=True, position=0, argstr='--sdf="%s"', mandatory=True,
desc="statistics spatial map", copyfile=False)
mask = File(exists=True, position=1, argstr='--mask="%s"', mandatory=True,
desc="mask file", copyfile=False)
no_deactivation_class = traits.Bool(position=2, argstr="--zfstatmode",
desc="enforces no deactivation class")
class SMMOutputSpec(TraitedSpec):
null_p_map = File(exists=True)
activation_p_map = File(exists=True)
deactivation_p_map = File(exists=True)
class SMM(FSLCommand):
'''
Spatial Mixture Modelling. For more detail on the spatial mixture modelling see
Mixture Models with Adaptive Spatial Regularisation for Segmentation with an Application to FMRI Data;
Woolrich, M., Behrens, T., Beckmann, C., and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005.
'''
_cmd = 'mm --ld=logdir'
input_spec = SMMInputSpec
output_spec = SMMOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
# TODO get the true logdir from the stdout
outputs['null_p_map'] = self._gen_fname(basename="w1_mean",
cwd="logdir")
outputs['activation_p_map'] = self._gen_fname(
basename="w2_mean", cwd="logdir")
if not isdefined(self.inputs.no_deactivation_class) or not self.inputs.no_deactivation_class:
outputs['deactivation_p_map'] = self._gen_fname(
basename="w3_mean", cwd="logdir")
return outputs
class MELODICInputSpec(FSLCommandInputSpec):
in_files = InputMultiPath(
File(exists=True), argstr="-i %s", mandatory=True, position=0,
desc="input file names (either single file name or a list)")
out_dir = Directory(
argstr="-o %s", desc="output directory name", genfile=True)
mask = File(exists=True, argstr="-m %s",
desc="file name of mask for thresholding")
no_mask = traits.Bool(argstr="--nomask", desc="switch off masking")
update_mask = traits.Bool(
argstr="--update_mask", desc="switch off mask updating")
no_bet = traits.Bool(argstr="--nobet", desc="switch off BET")
bg_threshold = traits.Float(
argstr="--bgthreshold=%f", desc="brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected)")
dim = traits.Int(argstr="-d %d", desc="dimensionality reduction into #num dimensions"
"(default: automatic estimation)")
dim_est = traits.Str(argstr="--dimest=%s", desc="use specific dim. estimation technique:"
" lap, bic, mdl, aic, mean (default: lap)")
sep_whiten = traits.Bool(
argstr="--sep_whiten", desc="switch on separate whitening")
sep_vn = traits.Bool(
argstr="--sep_vn", desc="switch off joined variance normalization")
num_ICs = traits.Int(
argstr="-n %d", desc="number of IC's to extract (for deflation approach)")
approach = traits.Str(argstr="-a %s", desc="approach for decomposition, 2D: defl, symm (default), "
" 3D: tica (default), concat")
non_linearity = traits.Str(
argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4")
var_norm = traits.Bool(
argstr="--vn", desc="switch off variance normalization")
pbsc = traits.Bool(
argstr="--pbsc", desc="switch off conversion to percent BOLD signal change")
cov_weight = traits.Float(argstr="--covarweight=%f", desc="voxel-wise weights for the covariance "
"matrix (e.g. segmentation information)")
epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change")
epsilonS = traits.Float(
argstr="--epsS=%f", desc="minimum error change for rank-1 approximation in TICA")
maxit = traits.Int(argstr="--maxit=%d",
desc="maximum number of iterations before restart")
max_restart = traits.Int(
argstr="--maxrestart=%d", desc="maximum number of restarts")
mm_thresh = traits.Float(
argstr="--mmthresh=%f", desc="threshold for Mixture Model based inference")
no_mm = traits.Bool(
argstr="--no_mm", desc="switch off mixture modelling on IC maps")
ICs = File(exists=True, argstr="--ICs=%s",
desc="filename of the IC components file for mixture modelling")
mix = File(exists=True, argstr="--mix=%s",
desc="mixing matrix for mixture modelling / filtering")
smode = File(exists=True, argstr="--smode=%s",
desc="matrix of session modes for report generation")
rem_cmp = traits.List(
traits.Int, argstr="-f %d", desc="component numbers to remove")
report = traits.Bool(argstr="--report", desc="generate Melodic web report")
bg_image = File(exists=True, argstr="--bgimage=%s", desc="specify background image for report"
" (default: mean image)")
tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds")
log_power = traits.Bool(
argstr="--logPower", desc="calculate log of power for frequency spectrum")
t_des = File(exists=True, argstr="--Tdes=%s",
desc="design matrix across time-domain")
t_con = File(exists=True, argstr="--Tcon=%s",
desc="t-contrast matrix across time-domain")
s_des = File(exists=True, argstr="--Sdes=%s",
desc="design matrix across subject-domain")
s_con = File(exists=True, argstr="--Scon=%s",
desc="t-contrast matrix across subject-domain")
out_all = traits.Bool(argstr="--Oall", desc="output everything")
out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix")
out_stats = traits.Bool(
argstr="--Ostats", desc="output thresholded maps and probability maps")
out_pca = traits.Bool(argstr="--Opca", desc="output PCA results")
out_white = traits.Bool(
argstr="--Owhite", desc="output whitening/dewhitening matrices")
out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs")
out_mean = traits.Bool(argstr="--Omean", desc="output mean volume")
report_maps = traits.Str(argstr="--report_maps=%s",
desc="control string for spatial map images (see slicer)")
remove_deriv = traits.Bool(argstr="--remove_deriv", desc="removes every second entry in paradigm"
" file (EV derivatives)")
class MELODICOutputSpec(TraitedSpec):
out_dir = Directory(exists=True)
report_dir = Directory(exists=True)
class MELODIC(FSLCommand):
"""Multivariate Exploratory Linear Optimised Decomposition into Independent Components
Examples
--------
>>> melodic_setup = MELODIC()
>>> melodic_setup.inputs.approach = 'tica'
>>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii']
>>> melodic_setup.inputs.no_bet = True
>>> melodic_setup.inputs.bg_threshold = 10
>>> melodic_setup.inputs.tr_sec = 1.5
>>> melodic_setup.inputs.mm_thresh = 0.5
>>> melodic_setup.inputs.out_stats = True
>>> melodic_setup.inputs.t_des = 'timeDesign.mat'
>>> melodic_setup.inputs.t_con = 'timeDesign.con'
>>> melodic_setup.inputs.s_des = 'subjectDesign.mat'
>>> melodic_setup.inputs.s_con = 'subjectDesign.con'
>>> melodic_setup.inputs.out_dir = 'groupICA.out'
>>> melodic_setup.run() # doctest: +SKIP
"""
input_spec = MELODICInputSpec
output_spec = MELODICOutputSpec
_cmd = 'melodic'
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_dir'] = self.inputs.out_dir
if not isdefined(outputs['out_dir']):
outputs['out_dir'] = self._gen_filename("out_dir")
if isdefined(self.inputs.report) and self.inputs.report:
outputs['report_dir'] = os.path.join(
self._gen_filename("out_dir"), "report")
return outputs
def _gen_filename(self, name):
if name == "out_dir":
return os.getcwd()
class SmoothEstimateInputSpec(FSLCommandInputSpec):
dof = traits.Int(argstr='--dof=%d', mandatory=True,
xor=['zstat_file'],
desc='number of degrees of freedom')
mask_file = File(argstr='--mask=%s',
exists=True, mandatory=True,
desc='brain mask volume')
residual_fit_file = File(argstr='--res=%s',
exists=True, requires=['dof'],
desc='residual-fit image file')
zstat_file = File(argstr='--zstat=%s',
exists=True, xor=['dof'],
desc='zstat image file')
class SmoothEstimateOutputSpec(TraitedSpec):
dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))')
volume = traits.Int(desc='number of voxels in mask')
resels = traits.Float(desc='number of resels')
class SmoothEstimate(FSLCommand):
""" Estimates the smoothness of an image
Examples
--------
>>> est = SmoothEstimate()
>>> est.inputs.zstat_file = 'zstat1.nii.gz'
>>> est.inputs.mask_file = 'mask.nii'
>>> est.cmdline
'smoothest --mask=mask.nii --zstat=zstat1.nii.gz'
"""
input_spec = SmoothEstimateInputSpec
output_spec = SmoothEstimateOutputSpec
_cmd = 'smoothest'
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
stdout = runtime.stdout.split('\n')
outputs.dlh = float(stdout[0].split()[1])
outputs.volume = int(stdout[1].split()[1])
outputs.resels = float(stdout[2].split()[1])
return outputs
class ClusterInputSpec(FSLCommandInputSpec):
in_file = File(argstr='--in=%s', mandatory=True,
exists=True, desc='input volume')
threshold = traits.Float(argstr='--thresh=%.10f',
mandatory=True,
desc='threshold for input volume')
out_index_file = traits.Either(traits.Bool, File,
argstr='--oindex=%s',
desc='output of cluster index (in size order)', hash_files=False)
out_threshold_file = traits.Either(traits.Bool, File,
argstr='--othresh=%s',
desc='thresholded image', hash_files=False)
out_localmax_txt_file = traits.Either(traits.Bool, File,
argstr='--olmax=%s',
desc='local maxima text file', hash_files=False)
out_localmax_vol_file = traits.Either(traits.Bool, File,
argstr='--olmaxim=%s',
desc='output of local maxima volume', hash_files=False)
out_size_file = traits.Either(traits.Bool, File,
argstr='--osize=%s',
desc='filename for output of size image', hash_files=False)
out_max_file = traits.Either(traits.Bool, File,
argstr='--omax=%s',
desc='filename for output of max image', hash_files=False)
out_mean_file = traits.Either(traits.Bool, File,
argstr='--omean=%s',
desc='filename for output of mean image', hash_files=False)
out_pval_file = traits.Either(traits.Bool, File,
argstr='--opvals=%s',
desc='filename for image output of log pvals', hash_files=False)
pthreshold = traits.Float(argstr='--pthresh=%.10f',
requires=['dlh', 'volume'],
desc='p-threshold for clusters')
peak_distance = traits.Float(argstr='--peakdist=%.10f',
desc='minimum distance between local maxima/minima, in mm (default 0)')
cope_file = traits.File(argstr='--cope=%s',
desc='cope volume')
volume = traits.Int(argstr='--volume=%d',
desc='number of voxels in the mask')
dlh = traits.Float(argstr='--dlh=%.10f',
desc='smoothness estimate = sqrt(det(Lambda))')
fractional = traits.Bool('--fractional',
desc='interprets the threshold as a fraction of the robust range')
connectivity = traits.Int(argstr='--connectivity=%d',
desc='the connectivity of voxels (default 26)')
use_mm = traits.Bool('--mm', desc='use mm, not voxel, coordinates')
find_min = traits.Bool('--min', desc='find minima instead of maxima')
no_table = traits.Bool(
'--no_table', desc='suppresses printing of the table info')
minclustersize = traits.Bool(argstr='--minclustersize',
desc='prints out minimum significant cluster size')
xfm_file = File(argstr='--xfm=%s',
desc='filename for Linear: input->standard-space transform. Non-linear: input->highres transform')
std_space_file = File(argstr='--stdvol=%s',
desc='filename for standard-space volume')
num_maxima = traits.Int(argstr='--num=%d',
desc='no of local maxima to report')
warpfield_file = File(argstr='--warpvol=%s',
desc='file contining warpfield')
class ClusterOutputSpec(TraitedSpec):
index_file = File(desc='output of cluster index (in size order)')
threshold_file = File(desc='thresholded image')
localmax_txt_file = File(desc='local maxima text file')
localmax_vol_file = File(desc='output of local maxima volume')
size_file = File(desc='filename for output of size image')
max_file = File(desc='filename for output of max image')
mean_file = File(desc='filename for output of mean image')
pval_file = File(desc='filename for image output of log pvals')
class Cluster(FSLCommand):
""" Uses FSL cluster to perform clustering on statistical output
Examples
--------
>>> cl = Cluster()
>>> cl.inputs.threshold = 2.3
>>> cl.inputs.in_file = 'zstat1.nii.gz'
>>> cl.inputs.out_localmax_txt_file = 'stats.txt'
>>> cl.cmdline
'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000'
"""
input_spec = ClusterInputSpec
output_spec = ClusterOutputSpec
_cmd = 'cluster'
filemap = {'out_index_file': 'index', 'out_threshold_file': 'threshold',
'out_localmax_txt_file': 'localmax.txt',
'out_localmax_vol_file': 'localmax',
'out_size_file': 'size', 'out_max_file': 'max',
'out_mean_file': 'mean', 'out_pval_file': 'pval'}
def _list_outputs(self):
outputs = self.output_spec().get()
for key, suffix in self.filemap.items():
outkey = key[4:]
inval = getattr(self.inputs, key)
if isdefined(inval):
if isinstance(inval, bool):
if inval:
change_ext = True
if suffix.endswith('.txt'):
change_ext = False
outputs[outkey] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
else:
outputs[outkey] = os.path.abspath(inval)
return outputs
def _format_arg(self, name, spec, value):
if name in self.filemap.keys():
if isinstance(value, bool):
fname = self._list_outputs()[name[4:]]
else:
fname = value
return spec.argstr % fname
return super(Cluster, self)._format_arg(name, spec, value)
class RandomiseInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, desc='4D input file', argstr='-i %s',
position=0, mandatory=True)
base_name = traits.Str(
'tbss_', desc='the rootname that all generated files will have',
argstr='-o "%s"', position=1, usedefault=True)
design_mat = File(
exists=True, desc='design matrix file', argstr='-d %s', position=2)
tcon = File(
exists=True, desc='t contrasts file', argstr='-t %s', position=3)
fcon = File(exists=True, desc='f contrasts file', argstr='-f %s')
mask = File(exists=True, desc='mask image', argstr='-m %s')
x_block_labels = File(
exists=True, desc='exchangeability block labels file', argstr='-e %s')
demean = traits.Bool(
desc='demean data temporally before model fitting', argstr='-D')
one_sample_group_mean = traits.Bool(
desc='perform 1-sample group-mean test instead of generic permutation test',
argstr='-1')
show_total_perms = traits.Bool(
desc='print out how many unique permutations would be generated and exit',
argstr='-q')
show_info_parallel_mode = traits.Bool(
desc='print out information required for parallel mode and exit',
argstr='-Q')
vox_p_values = traits.Bool(
desc='output voxelwise (corrected and uncorrected) p-value images',
argstr='-x')
tfce = traits.Bool(
desc='carry out Threshold-Free Cluster Enhancement', argstr='-T')
tfce2D = traits.Bool(
desc='carry out Threshold-Free Cluster Enhancement with 2D optimisation',
argstr='--T2')
f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only')
raw_stats_imgs = traits.Bool(
desc='output raw ( unpermuted ) statistic images', argstr='-R')
p_vec_n_dist_files = traits.Bool(
desc='output permutation vector and null distribution text files',
argstr='-P')
num_perm = traits.Int(
argstr='-n %d', desc='number of permutations (default 5000, set to 0 for exhaustive)')
seed = traits.Int(
argstr='--seed=%d', desc='specific integer seed for random number generator')
var_smooth = traits.Int(
argstr='-v %d', desc='use variance smoothing (std is in mm)')
c_thresh = traits.Float(
argstr='-c %.2f', desc='carry out cluster-based thresholding')
cm_thresh = traits.Float(
argstr='-C %.2f', desc='carry out cluster-mass-based thresholding')
f_c_thresh = traits.Float(
argstr='-F %.2f', desc='carry out f cluster thresholding')
f_cm_thresh = traits.Float(
argstr='-S %.2f', desc='carry out f cluster-mass thresholding')
tfce_H = traits.Float(
argstr='--tfce_H=%.2f', desc='TFCE height parameter (default=2)')
tfce_E = traits.Float(
argstr='--tfce_E=%.2f', desc='TFCE extent parameter (default=0.5)')
tfce_C = traits.Float(
argstr='--tfce_C=%.2f', desc='TFCE connectivity (6 or 26; default=6)')
class RandomiseOutputSpec(TraitedSpec):
tstat_files = traits.List(
File(exists=True),
desc='t contrast raw statistic')
fstat_files = traits.List(
File(exists=True),
desc='f contrast raw statistic')
t_p_files = traits.List(
File(exists=True),
desc='f contrast uncorrected p values files')
f_p_files = traits.List(
File(exists=True),
desc='f contrast uncorrected p values files')
t_corrected_p_files = traits.List(
File(exists=True),
desc='t contrast FWE (Family-wise error) corrected p values files')
f_corrected_p_files = traits.List(
File(exists=True),
desc='f contrast FWE (Family-wise error) corrected p values files')
class Randomise(FSLCommand):
"""XXX UNSTABLE DO NOT USE
FSL Randomise: feeds the 4D projected FA data into GLM
modelling and thresholding
in order to find voxels which correlate with your model
Example
-------
>>> import nipype.interfaces.fsl as fsl
>>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat')
>>> rand.cmdline
'randomise -i allFA.nii -o "tbss_" -d design.mat -t design.con -m mask.nii'
"""
_cmd = 'randomise'
input_spec = RandomiseInputSpec
output_spec = RandomiseOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tstat_files'] = glob(self._gen_fname(
'%s_tstat*.nii' % self.inputs.base_name))
outputs['fstat_files'] = glob(self._gen_fname(
'%s_fstat*.nii' % self.inputs.base_name))
prefix = False
if self.inputs.tfce or self.inputs.tfce2D:
prefix = 'tfce'
elif self.inputs.vox_p_values:
prefix = 'vox'
elif self.inputs.c_thresh or self.inputs.f_c_thresh:
prefix = 'clustere'
elif self.inputs.cm_thresh or self.inputs.f_cm_thresh:
prefix = 'clusterm'
if prefix:
outputs['t_p_files'] = glob(self._gen_fname(
'%s_%s_p_tstat*' % (self.inputs.base_name, prefix)))
outputs['t_corrected_p_files'] = glob(self._gen_fname(
'%s_%s_corrp_tstat*.nii' % (self.inputs.base_name, prefix)))
outputs['f_p_files'] = glob(self._gen_fname(
'%s_%s_p_fstat*.nii' % (self.inputs.base_name, prefix)))
outputs['f_corrected_p_files'] = glob(self._gen_fname(
'%s_%s_corrp_fstat*.nii' % (self.inputs.base_name, prefix)))
return outputs
class GLMInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1,
desc='input file name (text matrix or 3D/4D image file)')
out_file = File(name_template="%s_glm", argstr='-o %s', position=3,
desc=('filename for GLM parameter estimates'
+ ' (GLM betas)'),
name_source="in_file", keep_extension=True)
design = File(exists=True, argstr='-d %s', mandatory=True, position=2,
desc=('file name of the GLM design matrix (text time'
+ ' courses for temporal regression or an image'
+ ' file for spatial regression)'))
contrasts = File(exists=True, argstr='-c %s', desc=('matrix of t-statics'
+ ' contrasts'))
mask = File(exists=True, argstr='-m %s', desc=('mask image file name if'
+ ' input is image'))
dof = traits.Int(argstr='--dof=%d', desc=('set degrees of freedom'
+ ' explicitly'))
des_norm = traits.Bool(argstr='--des_norm', desc=('switch on normalization'
+ ' of the design matrix'
+ ' columns to unit std'
+ ' deviation'))
dat_norm = traits.Bool(argstr='--dat_norm', desc=('switch on normalization'
+ ' of the data time'
+ ' series to unit std'
+ ' deviation'))
var_norm = traits.Bool(argstr='--vn', desc=('perform MELODIC variance-'
+ 'normalisation on data'))
demean = traits.Bool(argstr='--demean', desc=('switch on demeaining of '
+ ' design and data'))
out_cope = File(argstr='--out_cope=%s',
desc='output file name for COPE (either as txt or image')
out_z_name = File(argstr='--out_z=%s',
desc='output file name for Z-stats (either as txt or image')
out_t_name = File(argstr='--out_t=%s',
desc='output file name for t-stats (either as txt or image')
out_p_name = File(argstr='--out_p=%s',
desc=('output file name for p-values of Z-stats (either as'
+ ' text file or image)'))
out_f_name = File(argstr='--out_f=%s',
desc='output file name for F-value of full model fit')
out_pf_name = File(argstr='--out_pf=%s',
desc='output file name for p-value for full model fit')
out_res_name = File(argstr='--out_res=%s',
desc='output file name for residuals')
out_varcb_name = File(argstr='--out_varcb=%s',
desc='output file name for variance of COPEs')
out_sigsq_name = File(argstr='--out_sigsq=%s',
desc=('output file name for residual noise variance'
+ ' sigma-square'))
out_data_name = File(argstr='--out_data=%s',
desc='output file name for pre-processed data')
out_vnscales_name = File(argstr='--out_vnscales=%s',
desc=('output file name for scaling factors for variance'
+ ' normalisation'))
class GLMOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=('file name of GLM parameters'
' (if generated)'))
out_cope = OutputMultiPath(File(exists=True),
desc=('output file name for COPEs (either as '
'text file or image)'))
out_z = OutputMultiPath(File(exists=True),
desc=('output file name for COPEs (either as text '
'file or image)'))
out_t = OutputMultiPath(File(exists=True),
desc=('output file name for t-stats (either as '
'text file or image)'))
out_p = OutputMultiPath(File(exists=True),
desc=('output file name for p-values of Z-stats '
'(either as text file or image)'))
out_f = OutputMultiPath(File(exists=True),
desc=('output file name for F-value of full model '
'fit'))
out_pf = OutputMultiPath(File(exists=True),
desc=('output file name for p-value for full '
'model fit'))
out_res = OutputMultiPath(File(exists=True),
desc='output file name for residuals')
out_varcb = OutputMultiPath(File(exists=True),
desc='output file name for variance of COPEs')
out_sigsq = OutputMultiPath(File(exists=True),
desc=('output file name for residual noise '
'variance sigma-square'))
out_data = OutputMultiPath(File(exists=True),
desc='output file for preprocessed data')
out_vnscales = OutputMultiPath(File(exists=True),
desc=('output file name for scaling factors '
'for variance normalisation'))
class GLM(FSLCommand):
"""
FSL GLM:
Example
-------
>>> import nipype.interfaces.fsl as fsl
>>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI')
>>> glm.cmdline
'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii'
"""
_cmd = 'fsl_glm'
input_spec = GLMInputSpec
output_spec = GLMOutputSpec
def _list_outputs(self):
outputs = super(GLM, self)._list_outputs()
if isdefined(self.inputs.out_cope):
outputs['out_cope'] = os.path.abspath(self.inputs.out_cope)
if isdefined(self.inputs.out_z_name):
outputs['out_z'] = os.path.abspath(self.inputs.out_z_name)
if isdefined(self.inputs.out_t_name):
outputs['out_t'] = os.path.abspath(self.inputs.out_t_name)
if isdefined(self.inputs.out_p_name):
outputs['out_p'] = os.path.abspath(self.inputs.out_p_name)
if isdefined(self.inputs.out_f_name):
outputs['out_f'] = os.path.abspath(self.inputs.out_f_name)
if isdefined(self.inputs.out_pf_name):
outputs['out_pf'] = os.path.abspath(self.inputs.out_pf_name)
if isdefined(self.inputs.out_res_name):
outputs['out_res'] = os.path.abspath(self.inputs.out_res_name)
if isdefined(self.inputs.out_varcb_name):
outputs['out_varcb'] = os.path.abspath(self.inputs.out_varcb_name)
if isdefined(self.inputs.out_sigsq_name):
outputs['out_sigsq'] = os.path.abspath(self.inputs.out_sigsq_name)
if isdefined(self.inputs.out_data_name):
outputs['out_data'] = os.path.abspath(self.inputs.out_data_name)
if isdefined(self.inputs.out_vnscales_name):
outputs['out_vnscales'] = os.path.abspath(
self.inputs.out_vnscales_name)
return outputs
|
mick-d/nipype_source
|
nipype/interfaces/fsl/model.py
|
Python
|
bsd-3-clause
| 82,941
|
[
"Gaussian"
] |
34ae8bc97a1ec40524bd24ce088108f890e03ce2a939b763d948edf781664377
|
import pytest
from .utils import *
import psi4
from qcengine.testing import using
@pytest.mark.parametrize('engine', [
pytest.param('optking'),
pytest.param('geometric', marks=using('geometric')),
]) # yapf: disable
@pytest.mark.parametrize('inp', [
pytest.param({'name': 'hf', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.027032783717, 'ref_nuc': 9.300794299874}, id='rhf(df)'),
pytest.param({'name': 'hf', 'options': {'scf_type': 'pk'}, 'ref_ene' : -76.027053512764, 'ref_nuc': 9.300838770294}, id='rhf(pk)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref_ene' : -76.230938589591, 'ref_nuc': 9.133271168193}, id='mp2(df)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'conv'}, 'ref_ene' : -76.230989373502, 'ref_nuc': 9.133125471291}, id='mp2(conv)'),
pytest.param({'name': 'b3lyp', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.420645414834, 'ref_nuc': 9.090397129492}, id='b3lyp'),
]) # yapf: disable
def test_h2o(inp, engine):
"""Optimization of the square water molecule"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz',
'g_convergence': 'gau_tight'
})
psi4.set_options(inp['options'])
e, wfn = psi4.optimize(inp['name'], return_wfn=True, engine=engine)
assert compare_values(inp['ref_ene'], e, 6)
assert compare_values(inp['ref_nuc'], h2o.nuclear_repulsion_energy(), 3)
@using('geometric')
@pytest.mark.parametrize('inp', [
pytest.param({'name': 'hf', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.02079629252714, 'ref_nuc': 9.265341708725257}, id='rhf(df)'),
pytest.param({'name': 'hf', 'options': {'scf_type': 'pk'}, 'ref_ene' : -76.02082389228, 'ref_nuc': 9.26528625744628}, id='rhf(pk)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'df'}, 'ref_ene' : -76.22711819393223, 'ref_nuc': 9.09137805747361}, id='mp2(df)'),
pytest.param({'name': 'mp2', 'options': {'mp2_type': 'conv'}, 'ref_ene' : -76.2271678506303, 'ref_nuc': 9.091178486990861}, id='mp2(conv)'),
pytest.param({'name': 'b3lyp', 'options': {'scf_type': 'df'}, 'ref_ene' : -76.41632755714534, 'ref_nuc': 9.04535641436914}, id='b3lyp'),
]) # yapf: disable
def test_h2o_constrained(inp):
"""Constrained optimization of the square water molecule"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz',
'g_convergence': 'gau_tight'
})
psi4.set_options(inp['options'])
# geometric specific options
geometric_keywords = {
'coordsys' : 'tric',
'enforce' : 0.0,
'constraints' : {
'set' : [{'type' : 'angle',
'indices' : [1, 0, 2],
'value' : 90.0 }]
}
}
e, wfn = psi4.optimize(inp['name'], return_wfn=True, engine='geometric', optimizer_keywords=geometric_keywords)
assert compare_values(inp['ref_ene'], e, 6)
assert compare_values(inp['ref_nuc'], h2o.nuclear_repulsion_energy(), 3)
|
ashutoshvt/psi4
|
tests/pytests/test_geometric.py
|
Python
|
lgpl-3.0
| 3,139
|
[
"Psi4"
] |
8ac63fcbfcb7adbf7e7dbba9c2d4f1f77f6ef412988fc24ba759a9323bdbbccf
|
# Copied from https://github.com/PX4/ecl/commit/264c8c4e8681704e4719d0a03b848df8617c0863
# and modified for ArduPilot
from sympy import *
from code_gen import *
import numpy as np
# q: quaternion describing rotation from frame 1 to frame 2
# returns a rotation matrix derived form q which describes the same
# rotation
def quat2Rot(q):
q0 = q[0]
q1 = q[1]
q2 = q[2]
q3 = q[3]
# This form is the one normally used in flight dynamics and inertial navigation texts, eg
# Aircraft Control and Simulation, Stevens,B.L, Lewis,F.L, Johnson,E.N, Third Edition, eqn 1.8-18
# It does produce second order terms in the covariance prediction that can be problematic
# with single precision processing.
# It requires the quternion to be unit length.
# Rot = Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2*(q1*q2 - q0*q3), 2*(q1*q3 + q0*q2)],
# [2*(q1*q2 + q0*q3), q0**2 - q1**2 + q2**2 - q3**2, 2*(q2*q3 - q0*q1)],
# [2*(q1*q3-q0*q2), 2*(q2*q3 + q0*q1), q0**2 - q1**2 - q2**2 + q3**2]])
# This form removes q1 from the 0,0, q2 from the 1,1 and q3 from the 2,2 entry and results
# in a covariance prediction that is better conditioned.
# It requires the quaternion to be unit length and is mathematically identical
# to the alternate form when q0**2 + q1**2 + q2**2 + q3**2 = 1
# See https://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToMatrix/index.htm
Rot = Matrix([[1 - 2*(q2**2 + q3**2), 2*(q1*q2 - q0*q3) , 2*(q1*q3 + q0*q2) ],
[2*(q1*q2 + q0*q3) , 1 - 2*(q1**2 + q3**2), 2*(q2*q3 - q0*q1) ],
[2*(q1*q3-q0*q2) , 2*(q2*q3 + q0*q1) , 1 - 2*(q1**2 + q2**2)]])
return Rot
def create_cov_matrix(i, j):
if j >= i:
# return Symbol("P(" + str(i) + "," + str(j) + ")", real=True)
# legacy array format
return Symbol("P[" + str(i) + "][" + str(j) + "]", real=True)
else:
return 0
def create_yaw_estimator_cov_matrix():
# define a symbolic covariance matrix
P = Matrix(3,3,create_cov_matrix)
for index in range(3):
for j in range(3):
if index > j:
P[index,j] = P[j,index]
return P
def create_Tbs_matrix(i, j):
# return Symbol("Tbs(" + str(i) + "," + str(j) + ")", real=True)
# legacy array format
return Symbol("Tbs[" + str(i) + "][" + str(j) + "]", real=True)
def quat_mult(p,q):
r = Matrix([p[0] * q[0] - p[1] * q[1] - p[2] * q[2] - p[3] * q[3],
p[0] * q[1] + p[1] * q[0] + p[2] * q[3] - p[3] * q[2],
p[0] * q[2] - p[1] * q[3] + p[2] * q[0] + p[3] * q[1],
p[0] * q[3] + p[1] * q[2] - p[2] * q[1] + p[3] * q[0]])
return r
def create_symmetric_cov_matrix(n):
# define a symbolic covariance matrix
P = Matrix(n,n,create_cov_matrix)
for index in range(n):
for j in range(n):
if index > j:
P[index,j] = P[j,index]
return P
# generate equations for observation vector innovation variances
def generate_observation_vector_innovation_variances(P,state,observation,variance,n_obs):
H = observation.jacobian(state)
innovation_variance = zeros(n_obs,1)
for index in range(n_obs):
H[index,:] = Matrix([observation[index]]).jacobian(state)
innovation_variance[index] = H[index,:] * P * H[index,:].T + Matrix([variance])
IV_simple = cse(innovation_variance, symbols("IV0:1000"), optimizations='basic')
return IV_simple
# generate equations for observation Jacobian and Kalman gain
def generate_observation_equations(P,state,observation,variance,varname="HK"):
H = Matrix([observation]).jacobian(state)
innov_var = H * P * H.T + Matrix([variance])
assert(innov_var.shape[0] == 1)
assert(innov_var.shape[1] == 1)
K = P * H.T / innov_var[0,0]
extension="0:1000"
var_string = varname+extension
HK_simple = cse(Matrix([H.transpose(), K]), symbols(var_string), optimizations='basic')
return HK_simple
# generate equations for observation vector Jacobian and Kalman gain
# n_obs is the vector dimension and must be >= 2
def generate_observation_vector_equations(P,state,observation,variance,n_obs):
K = zeros(24,n_obs)
H = observation.jacobian(state)
HK = zeros(n_obs*48,1)
for index in range(n_obs):
H[index,:] = Matrix([observation[index]]).jacobian(state)
innov_var = H[index,:] * P * H[index,:].T + Matrix([variance])
assert(innov_var.shape[0] == 1)
assert(innov_var.shape[1] == 1)
K[:,index] = P * H[index,:].T / innov_var[0,0]
HK[index*48:(index+1)*48,0] = Matrix([H[index,:].transpose(), K[:,index]])
HK_simple = cse(HK, symbols("HK0:1000"), optimizations='basic')
return HK_simple
# write single observation equations to file
def write_equations_to_file(equations,code_generator_id,n_obs):
if (n_obs < 1):
return
if (n_obs == 1):
code_generator_id.print_string("Sub Expressions")
code_generator_id.write_subexpressions(equations[0])
code_generator_id.print_string("Observation Jacobians")
code_generator_id.write_matrix(Matrix(equations[1][0][0:24]), "Hfusion", False)
code_generator_id.print_string("Kalman gains")
code_generator_id.write_matrix(Matrix(equations[1][0][24:]), "Kfusion", False)
else:
code_generator_id.print_string("Sub Expressions")
code_generator_id.write_subexpressions(equations[0])
for axis_index in range(n_obs):
start_index = axis_index*48
code_generator_id.print_string("Observation Jacobians - axis %i" % axis_index)
code_generator_id.write_matrix(Matrix(equations[1][0][start_index:start_index+24]), "Hfusion", False)
code_generator_id.print_string("Kalman gains - axis %i" % axis_index)
code_generator_id.write_matrix(Matrix(equations[1][0][start_index+24:start_index+48]), "Kfusion", False)
return
# derive equations for sequential fusion of optical flow measurements
def optical_flow_observation(P,state,R_to_body,vx,vy,vz):
flow_code_generator = CodeGenerator("./generated/flow_generated.cpp")
range = symbols("range", real=True) # range from camera focal point to ground along sensor Z axis
obs_var = symbols("R_LOS", real=True) # optical flow line of sight rate measurement noise variance
# Define rotation matrix from body to sensor frame
Tbs = Matrix(3,3,create_Tbs_matrix)
# Calculate earth relative velocity in a non-rotating sensor frame
relVelSensor = Tbs * R_to_body * Matrix([vx,vy,vz])
# Divide by range to get predicted angular LOS rates relative to X and Y
# axes. Note these are rates in a non-rotating sensor frame
losRateSensorX = +relVelSensor[1]/range
losRateSensorY = -relVelSensor[0]/range
# calculate the observation Jacobian and Kalman gains for the X axis
equations = generate_observation_equations(P,state,losRateSensorX,obs_var)
flow_code_generator.print_string("X Axis Equations")
write_equations_to_file(equations,flow_code_generator,1)
# calculate the observation Jacobian and Kalman gains for the Y axis
equations = generate_observation_equations(P,state,losRateSensorY,obs_var)
flow_code_generator.print_string("Y Axis Equations")
write_equations_to_file(equations,flow_code_generator,1)
flow_code_generator.close()
# calculate a combined result for a possible reduction in operations, but will use more stack
observation = Matrix([relVelSensor[1]/range,-relVelSensor[0]/range])
equations = generate_observation_vector_equations(P,state,observation,obs_var,2)
flow_code_generator_alt = CodeGenerator("./generated/flow_generated_alt.cpp")
write_equations_to_file(equations,flow_code_generator_alt,2)
flow_code_generator_alt.close()
return
# Derive equations for sequential fusion of body frame velocity measurements
def body_frame_velocity_observation(P,state,R_to_body,vx,vy,vz):
obs_var = symbols("R_VEL", real=True) # measurement noise variance
# Calculate earth relative velocity in a non-rotating sensor frame
vel_bf = R_to_body * Matrix([vx,vy,vz])
vel_bf_code_generator = CodeGenerator("./generated/vel_bf_generated.cpp")
axes = [0,1,2]
H_obs = vel_bf.jacobian(state) # observation Jacobians
K_gain = zeros(24,3)
for index in axes:
equations = generate_observation_equations(P,state,vel_bf[index],obs_var)
vel_bf_code_generator.print_string("axis %i" % index)
vel_bf_code_generator.write_subexpressions(equations[0])
vel_bf_code_generator.write_matrix(Matrix(equations[1][0][0:24]), "H_VEL", False)
vel_bf_code_generator.write_matrix(Matrix(equations[1][0][24:]), "Kfusion", False)
vel_bf_code_generator.close()
# calculate a combined result for a possible reduction in operations, but will use more stack
equations = generate_observation_vector_equations(P,state,vel_bf,obs_var,3)
vel_bf_code_generator_alt = CodeGenerator("./generated/vel_bf_generated_alt.cpp")
write_equations_to_file(equations,vel_bf_code_generator_alt,3)
vel_bf_code_generator_alt.close()
# derive equations for fusion of dual antenna yaw measurement
def gps_yaw_observation(P,state,R_to_body):
obs_var = symbols("R_YAW", real=True) # measurement noise variance
ant_yaw = symbols("ant_yaw", real=True) # yaw angle of antenna array axis wrt X body axis
# define antenna vector in body frame
ant_vec_bf = Matrix([cos(ant_yaw),sin(ant_yaw),0])
# rotate into earth frame
ant_vec_ef = R_to_body.T * ant_vec_bf
# Calculate the yaw angle from the projection
observation = atan(ant_vec_ef[1]/ant_vec_ef[0])
equations = generate_observation_equations(P,state,observation,obs_var)
gps_yaw_code_generator = CodeGenerator("./generated/gps_yaw_generated.cpp")
write_equations_to_file(equations,gps_yaw_code_generator,1)
gps_yaw_code_generator.close()
return
# derive equations for fusion of declination
def declination_observation(P,state,ix,iy):
obs_var = symbols("R_DECL", real=True) # measurement noise variance
# the predicted measurement is the angle wrt magnetic north of the horizontal
# component of the measured field
observation = atan(iy/ix)
equations = generate_observation_equations(P,state,observation,obs_var)
mag_decl_code_generator = CodeGenerator("./generated/mag_decl_generated.cpp")
write_equations_to_file(equations,mag_decl_code_generator,1)
mag_decl_code_generator.close()
return
# derive equations for fusion of lateral body acceleration (multirotors only)
def body_frame_accel_observation(P,state,R_to_body,vx,vy,vz,wx,wy):
obs_var = symbols("R_ACC", real=True) # measurement noise variance
Kaccx = symbols("Kaccx", real=True) # measurement noise variance
Kaccy = symbols("Kaccy", real=True) # measurement noise variance
# use relationship between airspeed along the X and Y body axis and the
# drag to predict the lateral acceleration for a multirotor vehicle type
# where propulsion forces are generated primarily along the Z body axis
vrel = R_to_body*Matrix([vx-wx,vy-wy,vz]) # predicted wind relative velocity
# Use this nonlinear model for the prediction in the implementation only
# It uses a ballistic coefficient for each axis
# accXpred = -0.5*rho*vrel[0]*vrel[0]*BCXinv # predicted acceleration measured along X body axis
# accYpred = -0.5*rho*vrel[1]*vrel[1]*BCYinv # predicted acceleration measured along Y body axis
# Use a simple viscous drag model for the linear estimator equations
# Use the the derivative from speed to acceleration averaged across the
# speed range. This avoids the generation of a dirac function in the derivation
# The nonlinear equation will be used to calculate the predicted measurement in implementation
observation = Matrix([-Kaccx*vrel[0],-Kaccy*vrel[1]])
acc_bf_code_generator = CodeGenerator("./generated/acc_bf_generated.cpp")
H = observation.jacobian(state)
K = zeros(24,2)
axes = [0,1]
for index in axes:
equations = generate_observation_equations(P,state,observation[index],obs_var)
acc_bf_code_generator.print_string("Axis %i equations" % index)
write_equations_to_file(equations,acc_bf_code_generator,1)
acc_bf_code_generator.close()
# calculate a combined result for a possible reduction in operations, but will use more stack
equations = generate_observation_vector_equations(P,state,observation,obs_var,2)
acc_bf_code_generator_alt = CodeGenerator("./generated/acc_bf_generated_alt.cpp")
write_equations_to_file(equations,acc_bf_code_generator_alt,3)
acc_bf_code_generator_alt.close()
return
# yaw fusion
def yaw_observation(P,state,R_to_earth):
yaw_code_generator = CodeGenerator("./generated/yaw_generated.cpp")
# Derive observation Jacobian for fusion of 321 sequence yaw measurement
# Calculate the yaw (first rotation) angle from the 321 rotation sequence
# Provide alternative angle that avoids singularity at +-pi/2 yaw
angMeasA = atan(R_to_earth[1,0]/R_to_earth[0,0])
H_YAW321_A = Matrix([angMeasA]).jacobian(state)
H_YAW321_A_simple = cse(H_YAW321_A, symbols('SA0:200'))
angMeasB = pi/2 - atan(R_to_earth[0,0]/R_to_earth[1,0])
H_YAW321_B = Matrix([angMeasB]).jacobian(state)
H_YAW321_B_simple = cse(H_YAW321_B, symbols('SB0:200'))
yaw_code_generator.print_string("calculate 321 yaw observation matrix - option A")
yaw_code_generator.write_subexpressions(H_YAW321_A_simple[0])
yaw_code_generator.write_matrix(Matrix(H_YAW321_A_simple[1]).T, "H_YAW", False)
yaw_code_generator.print_string("calculate 321 yaw observation matrix - option B")
yaw_code_generator.write_subexpressions(H_YAW321_B_simple[0])
yaw_code_generator.write_matrix(Matrix(H_YAW321_B_simple[1]).T, "H_YAW", False)
# Derive observation Jacobian for fusion of 312 sequence yaw measurement
# Calculate the yaw (first rotation) angle from an Euler 312 sequence
# Provide alternative angle that avoids singularity at +-pi/2 yaw
angMeasA = atan(-R_to_earth[0,1]/R_to_earth[1,1])
H_YAW312_A = Matrix([angMeasA]).jacobian(state)
H_YAW312_A_simple = cse(H_YAW312_A, symbols('SA0:200'))
angMeasB = pi/2 - atan(-R_to_earth[1,1]/R_to_earth[0,1])
H_YAW312_B = Matrix([angMeasB]).jacobian(state)
H_YAW312_B_simple = cse(H_YAW312_B, symbols('SB0:200'))
yaw_code_generator.print_string("calculate 312 yaw observation matrix - option A")
yaw_code_generator.write_subexpressions(H_YAW312_A_simple[0])
yaw_code_generator.write_matrix(Matrix(H_YAW312_A_simple[1]).T, "H_YAW", False)
yaw_code_generator.print_string("calculate 312 yaw observation matrix - option B")
yaw_code_generator.write_subexpressions(H_YAW312_B_simple[0])
yaw_code_generator.write_matrix(Matrix(H_YAW312_B_simple[1]).T, "H_YAW", False)
yaw_code_generator.close()
return
# 3D magnetometer fusion
def mag_observation_variance(P,state,R_to_body,i,ib):
obs_var = symbols("R_MAG", real=True) # magnetometer measurement noise variance
m_mag = R_to_body * i + ib
# separate calculation of innovation variance equations for the y and z axes
m_mag[0]=0
innov_var_equations = generate_observation_vector_innovation_variances(P,state,m_mag,obs_var,3)
mag_innov_var_code_generator = CodeGenerator("./generated/3Dmag_innov_var_generated.cpp")
write_equations_to_file(innov_var_equations,mag_innov_var_code_generator,3)
mag_innov_var_code_generator.close()
return
# 3D magnetometer fusion
def mag_observation(P,state,R_to_body,i,ib):
obs_var = symbols("R_MAG", real=True) # magnetometer measurement noise variance
m_mag = R_to_body * i + ib
# calculate a separate set of equations for each axis
mag_code_generator = CodeGenerator("./generated/3Dmag_generated.cpp")
axes = [0,1,2]
label="HK"
for index in axes:
if (index==0):
label="HKX"
elif (index==1):
label="HKY"
elif (index==2):
label="HKZ"
else:
return
equations = generate_observation_equations(P,state,m_mag[index],obs_var,varname=label)
mag_code_generator.print_string("Axis %i equations" % index)
write_equations_to_file(equations,mag_code_generator,1)
mag_code_generator.close()
# calculate a combined set of equations for a possible reduction in operations, but will use slighlty more stack
equations = generate_observation_vector_equations(P,state,m_mag,obs_var,3)
mag_code_generator_alt = CodeGenerator("./generated/3Dmag_generated_alt.cpp")
write_equations_to_file(equations,mag_code_generator_alt,3)
mag_code_generator_alt.close()
return
# airspeed fusion
def tas_observation(P,state,vx,vy,vz,wx,wy):
obs_var = symbols("R_TAS", real=True) # true airspeed measurement noise variance
observation = sqrt((vx-wx)*(vx-wx)+(vy-wy)*(vy-wy)+vz*vz)
equations = generate_observation_equations(P,state,observation,obs_var)
tas_code_generator = CodeGenerator("./generated/tas_generated.cpp")
write_equations_to_file(equations,tas_code_generator,1)
tas_code_generator.close()
return
# sideslip fusion
def beta_observation(P,state,R_to_body,vx,vy,vz,wx,wy):
obs_var = symbols("R_BETA", real=True) # sideslip measurement noise variance
v_rel_ef = Matrix([vx-wx,vy-wy,vz])
v_rel_bf = R_to_body * v_rel_ef
observation = v_rel_bf[1]/v_rel_bf[0]
equations = generate_observation_equations(P,state,observation,obs_var)
beta_code_generator = CodeGenerator("./generated/beta_generated.cpp")
write_equations_to_file(equations,beta_code_generator,1)
beta_code_generator.close()
return
# yaw estimator prediction and observation code
def yaw_estimator():
dt = symbols("dt", real=True) # dt (sec)
psi = symbols("psi", real=True) # yaw angle of body frame wrt earth frame
vn, ve = symbols("vn ve", real=True) # velocity in world frame (north/east) - m/sec
daz = symbols("daz", real=True) # IMU z axis delta angle measurement in body axes - rad
dazVar = symbols("dazVar", real=True) # IMU Z axis delta angle measurement variance (rad^2)
dvx, dvy = symbols("dvx dvy", real=True) # IMU x and y axis delta velocity measurement in body axes - m/sec
dvxVar, dvyVar = symbols("dvxVar dvyVar", real=True) # IMU x and y axis delta velocity measurement variance (m/s)^2
# derive the body to nav direction transformation matrix
Tbn = Matrix([[cos(psi) , -sin(psi)],
[sin(psi) , cos(psi)]])
# attitude update equation
psiNew = psi + daz
# velocity update equations
velNew = Matrix([vn,ve]) + Tbn*Matrix([dvx,dvy])
# Define the state vectors
stateVector = Matrix([vn,ve,psi])
# Define vector of process equations
newStateVector = Matrix([velNew,psiNew])
# Calculate state transition matrix
F = newStateVector.jacobian(stateVector)
# Derive the covariance prediction equations
# Error growth in the inertial solution is assumed to be driven by 'noise' in the delta angles and
# velocities, after bias effects have been removed.
# derive the control(disturbance) influence matrix from IMU noise to state noise
G = newStateVector.jacobian(Matrix([dvx,dvy,daz]))
# derive the state error matrix
distMatrix = Matrix([[dvxVar , 0 , 0],
[0 , dvyVar , 0],
[0 , 0 , dazVar]])
Q = G * distMatrix * G.T
# propagate covariance matrix
P = create_yaw_estimator_cov_matrix()
P_new = F * P * F.T + Q
P_new_simple = cse(P_new, symbols("S0:1000"), optimizations='basic')
yaw_estimator_covariance_generator = CodeGenerator("./generated/yaw_estimator_covariance_prediction_generated.cpp")
yaw_estimator_covariance_generator.print_string("Equations for covariance matrix prediction")
yaw_estimator_covariance_generator.write_subexpressions(P_new_simple[0])
yaw_estimator_covariance_generator.write_matrix(Matrix(P_new_simple[1]), "_ekf_gsf[model_index].P", True)
yaw_estimator_covariance_generator.close()
# derive the covariance update equation for a NE velocity observation
velObsVar = symbols("velObsVar", real=True) # velocity observation variance (m/s)^2
H = Matrix([[1,0,0],
[0,1,0]])
R = Matrix([[velObsVar , 0],
[0 , velObsVar]])
S = H * P * H.T + R
S_det_inv = 1 / S.det()
S_inv = S.inv()
K = (P * H.T) * S_inv
P_new = P - K * S * K.T
# optimize code
t, [S_det_inv_s, S_inv_s, K_s, P_new_s] = cse([S_det_inv, S_inv, K, P_new], symbols("t0:1000"), optimizations='basic')
yaw_estimator_observation_generator = CodeGenerator("./generated/yaw_estimator_measurement_update_generated.cpp")
yaw_estimator_observation_generator.print_string("Intermediate variables")
yaw_estimator_observation_generator.write_subexpressions(t)
yaw_estimator_observation_generator.print_string("Equations for NE velocity innovation variance's determinante inverse")
yaw_estimator_observation_generator.write_matrix(Matrix([[S_det_inv_s]]), "_ekf_gsf[model_index].S_det_inverse", False)
yaw_estimator_observation_generator.print_string("Equations for NE velocity innovation variance inverse")
yaw_estimator_observation_generator.write_matrix(Matrix(S_inv_s), "_ekf_gsf[model_index].S_inverse", True)
yaw_estimator_observation_generator.print_string("Equations for NE velocity Kalman gain")
yaw_estimator_observation_generator.write_matrix(Matrix(K_s), "K", False)
yaw_estimator_observation_generator.print_string("Equations for covariance matrix update")
yaw_estimator_observation_generator.write_matrix(Matrix(P_new_s), "_ekf_gsf[model_index].P", True)
yaw_estimator_observation_generator.close()
def quaternion_error_propagation():
# define quaternion state vector
q0, q1, q2, q3 = symbols("q0 q1 q2 q3", real=True)
q = Matrix([q0, q1, q2, q3])
# define truth gravity unit vector in body frame
R_to_earth = quat2Rot(q)
R_to_body = R_to_earth.T
gravity_ef = Matrix([0,0,1])
gravity_bf = R_to_body * gravity_ef
# define perturbations to quaternion state vector q
dq0, dq1, dq2, dq3 = symbols("dq0 dq1 dq2 dq3", real=True)
q_delta = Matrix([dq0, dq1, dq2, dq3])
# apply perturbations
q_perturbed = q + q_delta
# gravity unit vector in body frame after quaternion perturbation
R_to_earth_perturbed = quat2Rot(q_perturbed)
R_to_body_perturbed = R_to_earth_perturbed.T
gravity_bf_perturbed = R_to_body_perturbed * gravity_ef
# calculate the angular difference between the perturbed and unperturbed body frame gravity unit vectors
# assuming small angles
tilt_error_bf = gravity_bf.cross(gravity_bf_perturbed)
# calculate the derivative of the perturbation rotation vector wrt the quaternion perturbations
J = tilt_error_bf.jacobian(q_delta)
# remove second order terms
# we don't want the error deltas to appear in the final result
J.subs(dq0,0)
J.subs(dq1,0)
J.subs(dq2,0)
J.subs(dq3,0)
# define covaraince matrix for quaternion states
P = create_symmetric_cov_matrix(4)
# discard off diagonals
P_diag = diag(P[0,0],P[1,1],P[2,2],P[3,3])
# rotate quaternion covariances into rotation vector state space
P_rot_vec = J * P_diag * J.transpose()
P_rot_vec_simple = cse(P_rot_vec, symbols("PS0:400"), optimizations='basic')
quat_code_generator = CodeGenerator("./generated/tilt_error_cov_mat_generated.cpp")
quat_code_generator.write_subexpressions(P_rot_vec_simple[0])
quat_code_generator.write_matrix(Matrix(P_rot_vec_simple[1]), "tiltErrCovMat", False, "[", "]")
quat_code_generator.close()
def generate_code():
print('Starting code generation:')
print('Creating symbolic variables ...')
dt = symbols("dt", real=True) # dt
g = symbols("g", real=True) # gravity constant
r_hor_vel = symbols("R_hor_vel", real=True) # horizontal velocity noise variance
r_ver_vel = symbols("R_vert_vel", real=True) # vertical velocity noise variance
r_hor_pos = symbols("R_hor_pos", real=True) # horizontal position noise variance
# inputs, integrated gyro measurements
# delta angle x y z
d_ang_x, d_ang_y, d_ang_z = symbols("dax day daz", real=True) # delta angle x
d_ang = Matrix([d_ang_x, d_ang_y, d_ang_z])
# inputs, integrated accelerometer measurements
# delta velocity x y z
d_v_x, d_v_y, d_v_z = symbols("dvx dvy dvz", real=True)
d_v = Matrix([d_v_x, d_v_y,d_v_z])
u = Matrix([d_ang, d_v])
# input noise
d_ang_x_var, d_ang_y_var, d_ang_z_var = symbols("daxVar dayVar dazVar", real=True)
d_v_x_var, d_v_y_var, d_v_z_var = symbols("dvxVar dvyVar dvzVar", real=True)
var_u = Matrix.diag(d_ang_x_var, d_ang_y_var, d_ang_z_var, d_v_x_var, d_v_y_var, d_v_z_var)
# define state vector
# attitude quaternion
qw, qx, qy, qz = symbols("q0 q1 q2 q3", real=True)
q = Matrix([qw,qx,qy,qz])
R_to_earth = quat2Rot(q)
R_to_body = R_to_earth.T
# velocity in NED local frame (north, east, down)
vx, vy, vz = symbols("vn ve vd", real=True)
v = Matrix([vx,vy,vz])
# position in NED local frame (north, east, down)
px, py, pz = symbols("pn pe pd", real=True)
p = Matrix([px,py,pz])
# delta angle bias x y z
d_ang_bx, d_ang_by, d_ang_bz = symbols("dax_b day_b daz_b", real=True)
d_ang_b = Matrix([d_ang_bx, d_ang_by, d_ang_bz])
d_ang_true = d_ang - d_ang_b
# delta velocity bias x y z
d_vel_bx, d_vel_by, d_vel_bz = symbols("dvx_b dvy_b dvz_b", real=True)
d_vel_b = Matrix([d_vel_bx, d_vel_by, d_vel_bz])
d_vel_true = d_v - d_vel_b
# earth magnetic field vector x y z
ix, iy, iz = symbols("magN magE magD", real=True)
i = Matrix([ix,iy,iz])
# earth magnetic field bias in body frame
ibx, iby, ibz = symbols("ibx iby ibz", real=True)
ib = Matrix([ibx,iby,ibz])
# wind in local NE frame (north, east)
wx, wy = symbols("vwn, vwe", real=True)
w = Matrix([wx,wy])
# state vector at arbitrary time t
state = Matrix([q, v, p, d_ang_b, d_vel_b, i, ib, w])
print('Defining state propagation ...')
# kinematic processes driven by IMU 'control inputs'
q_new = quat_mult(q, Matrix([1, 0.5 * d_ang_true[0], 0.5 * d_ang_true[1], 0.5 * d_ang_true[2]]))
v_new = v + R_to_earth * d_vel_true + Matrix([0,0,g]) * dt
p_new = p + v * dt
# static processes
d_ang_b_new = d_ang_b
d_vel_b_new = d_vel_b
i_new = i
ib_new = ib
w_new = w
# predicted state vector at time t + dt
state_new = Matrix([q_new, v_new, p_new, d_ang_b_new, d_vel_b_new, i_new, ib_new, w_new])
print('Computing state propagation jacobian ...')
A = state_new.jacobian(state)
G = state_new.jacobian(u)
P = create_symmetric_cov_matrix(24)
print('Computing covariance propagation ...')
P_new = A * P * A.T + G * var_u * G.T
for index in range(24):
for j in range(24):
if index > j:
P_new[index,j] = 0
print('Simplifying covariance propagation ...')
P_new_simple = cse(P_new, symbols("PS0:400"), optimizations='basic')
print('Writing covariance propagation to file ...')
cov_code_generator = CodeGenerator("./generated/covariance_generated.cpp")
cov_code_generator.print_string("Equations for covariance matrix prediction, without process noise!")
cov_code_generator.write_subexpressions(P_new_simple[0])
cov_code_generator.write_matrix(Matrix(P_new_simple[1]), "nextP", True, "[", "]")
cov_code_generator.close()
# derive autocode for other methods
print('Computing tilt error covariance matrix ...')
quaternion_error_propagation()
print('Generating heading observation code ...')
yaw_observation(P,state,R_to_earth)
print('Generating gps heading observation code ...')
gps_yaw_observation(P,state,R_to_body)
print('Generating mag observation code ...')
mag_observation_variance(P,state,R_to_body,i,ib)
mag_observation(P,state,R_to_body,i,ib)
print('Generating declination observation code ...')
declination_observation(P,state,ix,iy)
print('Generating airspeed observation code ...')
tas_observation(P,state,vx,vy,vz,wx,wy)
print('Generating sideslip observation code ...')
beta_observation(P,state,R_to_body,vx,vy,vz,wx,wy)
print('Generating optical flow observation code ...')
optical_flow_observation(P,state,R_to_body,vx,vy,vz)
print('Generating body frame velocity observation code ...')
body_frame_velocity_observation(P,state,R_to_body,vx,vy,vz)
print('Generating body frame acceleration observation code ...')
body_frame_accel_observation(P,state,R_to_body,vx,vy,vz,wx,wy)
print('Generating yaw estimator code ...')
yaw_estimator()
print('Code generation finished!')
if __name__ == "__main__":
generate_code()
|
lthall/Leonard_ardupilot
|
libraries/AP_NavEKF3/derivation/main.py
|
Python
|
gpl-3.0
| 29,240
|
[
"DIRAC"
] |
c55fc857465fd4806ab2df2e693339cd48146d482e5138511a7046e1ae75d174
|
"""
======================================
Plotting sensor layouts of MEG systems
======================================
In this example, sensor layouts of different MEG systems
are shown.
"""
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from mayavi import mlab
import mne
from mne.io import read_raw_fif, read_raw_ctf, read_raw_bti, read_raw_kit
from mne.datasets import sample, spm_face
from mne.viz import plot_trans
print(__doc__)
bti_path = op.abspath(op.dirname(mne.__file__)) + '/io/bti/tests/data/'
kit_path = op.abspath(op.dirname(mne.__file__)) + '/io/kit/tests/data/'
raws = dict(
Neuromag=read_raw_fif(sample.data_path() +
'/MEG/sample/sample_audvis_raw.fif'),
CTF_275=read_raw_ctf(spm_face.data_path() +
'/MEG/spm/SPM_CTF_MEG_example_faces1_3D.ds'),
Magnes_3600wh=read_raw_bti(op.join(bti_path, 'test_pdf_linux'),
op.join(bti_path, 'test_config_linux'),
op.join(bti_path, 'test_hs_linux')),
KIT=read_raw_kit(op.join(kit_path, 'test.sqd')),
)
for system, raw in raws.items():
# We don't have coil definitions for KIT refs, so exclude them
ref_meg = False if system == 'KIT' else True
fig = plot_trans(raw.info, trans=None, dig=False, eeg_sensors=False,
meg_sensors=True, coord_frame='meg', ref_meg=ref_meg)
mlab.title(system)
|
jniediek/mne-python
|
examples/visualization/plot_meg_sensors.py
|
Python
|
bsd-3-clause
| 1,463
|
[
"Mayavi"
] |
0072f6f174412a50eebe9bfa512a3f99d1349ed525f9bf64ea3c96c746109c53
|
# IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Base Class for the Contextualization Agent """
import time
import subprocess
import socket
import logging
import json
import os
import re
import yaml
from multiprocessing import Queue
from IM.SSH import SSH, AuthenticationException
class CtxtAgentBase:
""" Base Class for the Contextualization Agent """
SSH_WAIT_TIMEOUT = 600
PK_FILE = "/tmp/ansible_key"
# This value enables to retry the playbooks to avoid some SSH connectivity problems
# The minimum value is 1. This value will be in the data file generated by
# the ConfManager
PLAYBOOK_RETRIES = 1
INTERNAL_PLAYBOOK_RETRIES = 1
def __init__(self, conf_data_filename):
self.logger = None
self.conf_data_filename = conf_data_filename
def init_logger(self, log_file):
# Root logger: is used by paramiko
logging.basicConfig(filename=log_file,
level=logging.WARNING,
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
format='%(message)s',
datefmt='%m-%d-%Y %H:%M:%S')
# ctxt_agent logger
self.logger = logging.getLogger('ctxt_agent')
self.logger.setLevel(logging.DEBUG)
def wait_winrm_access(self, vm, max_wait=None):
"""
Test the WinRM access to the VM
"""
if max_wait is None:
max_wait = CtxtAgentBase.SSH_WAIT_TIMEOUT
delay = 10
wait = 0
last_tested_private = False
while wait < max_wait:
if 'ctxt_ip' in vm:
vm_ip = vm['ctxt_ip']
elif 'private_ip' in vm and not last_tested_private:
# First test the private one
vm_ip = vm['private_ip']
last_tested_private = True
else:
vm_ip = vm['ip']
last_tested_private = False
try:
self.logger.debug("Testing WinRM access to VM: " + vm_ip)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((vm_ip, vm['remote_port']))
except Exception:
self.logger.exception("Error connecting with WinRM with: " + vm_ip)
result = -1
if result == 0:
vm['ctxt_ip'] = vm_ip
return True
else:
wait += delay
time.sleep(delay)
def test_ssh(self, vm, vm_ip, remote_port, quiet, delay=10):
"""
Test the SSH access to the VM
return: init, new or pk_file or None if it fails
"""
success = False
res = None
if not quiet:
self.logger.debug("Testing SSH access to VM: %s:%s" % (vm_ip, remote_port))
try:
ssh_client = SSH(vm_ip, vm['user'], vm['passwd'], vm['private_key'], remote_port)
success = ssh_client.test_connectivity(delay)
res = 'init'
except AuthenticationException:
try_ansible_key = True
if 'new_passwd' in vm:
try_ansible_key = False
# If the process of changing credentials has finished in the
# VM, we must use the new ones
if not quiet:
self.logger.debug("Error connecting with SSH with initial credentials with: " +
vm_ip + ". Try to use new ones.")
try:
ssh_client = SSH(vm_ip, vm['user'], vm['new_passwd'], vm['private_key'], remote_port)
success = ssh_client.test_connectivity()
res = "new"
except AuthenticationException:
try_ansible_key = True
except Exception:
if not quiet:
self.logger.exception("Error connecting with SSH with: " + vm_ip)
success = False
if try_ansible_key:
# In some very special cases the last two cases fail, so check
# if the ansible key works
if not quiet:
self.logger.debug("Error connecting with SSH with initial credentials with: " +
vm_ip + ". Try to ansible_key.")
try:
ssh_client = SSH(vm_ip, vm['user'], None, CtxtAgentBase.PK_FILE, remote_port)
success = ssh_client.test_connectivity()
res = 'pk_file'
except Exception:
if not quiet:
self.logger.exception("Error connecting with SSH with: " + vm_ip)
success = False
except Exception:
if not quiet:
self.logger.exception("Error connecting with SSH with: " + vm_ip)
success = False
return success, res
def wait_ssh_access(self, vm, delay=10, max_wait=None, quiet=False):
"""
Wait the SSH access to the VM
return: init, new or pk_file or None if it fails
"""
if max_wait is None:
max_wait = CtxtAgentBase.SSH_WAIT_TIMEOUT
wait = 0
success = False
res = None
while wait < max_wait:
if 'ctxt_ip' in vm and 'ctxt_port' in vm:
# These have been previously tested and worked use it
vm_ip = vm['ctxt_ip']
remote_port = vm['ctxt_port']
success, res = self.test_ssh(vm, vm['ctxt_ip'], vm['ctxt_port'], quiet)
else:
# First test the private one
if 'private_ip' in vm:
vm_ip = vm['private_ip']
remote_port = vm['remote_port']
success, res = self.test_ssh(vm, vm_ip, remote_port, quiet)
if not success and remote_port != 22:
remote_port = 22
success, res = self.test_ssh(vm, vm_ip, 22, quiet)
# if not use the default one
if not success:
vm_ip = vm['ip']
remote_port = vm['remote_port']
success, res = self.test_ssh(vm, vm_ip, remote_port, quiet)
if not success and remote_port != 22:
remote_port = 22
success, res = self.test_ssh(vm, vm_ip, remote_port, quiet)
# if not use the default one
if not success and 'reverse_port' in vm:
vm_ip = '127.0.0.1'
remote_port = vm['reverse_port']
success, res = self.test_ssh(vm, vm_ip, remote_port, quiet)
wait += delay
if success:
vm['ctxt_ip'] = vm_ip
vm['ctxt_port'] = remote_port
return res
else:
time.sleep(delay)
return None
@staticmethod
def run_command(command, timeout=None, poll_delay=5):
"""
Function to run a command
"""
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
if timeout is not None:
wait = 0
while p.poll() is None and wait < timeout:
time.sleep(poll_delay)
wait += poll_delay
if p.poll() is None:
p.kill()
return "TIMEOUT"
(out, err) = p.communicate()
if p.returncode != 0:
return "ERROR: " + err + out
else:
return out
except Exception as ex:
return "ERROR: Exception msg: " + str(ex)
@staticmethod
def get_ssh(vm, pk_file, changed_pass=None):
"""
Get VM ssh connection
"""
private_key = vm['private_key']
if pk_file:
private_key = pk_file
return SSH(vm['ctxt_ip'], vm['user'], vm['passwd'], private_key, vm['ctxt_port'])
def removeRequiretty(self, vm, pk_file, changed_pass=None):
"""
Remove requiretty option from sudoers
"""
if not vm['master']:
self.logger.info("Removing requiretty to VM: " + vm['ip'])
try:
ssh_client = self.get_ssh(vm, pk_file, changed_pass)
# Activate tty mode to avoid some problems with sudo in REL
ssh_client.tty = True
sudo_pass = ""
if ssh_client.password:
sudo_pass = "echo '" + ssh_client.password + "' | "
res = ssh_client.execute_timeout(
sudo_pass + "sudo -S sed -i 's/.*requiretty$/#Defaults requiretty/' /etc/sudoers", 30)
if res is not None:
(stdout, stderr, code) = res
self.logger.debug("OUT: " + stdout + stderr)
return code == 0
else:
self.logger.error("No output.")
return False
except Exception:
self.logger.exception("Error removing requiretty to VM: " + vm['ip'])
return False
else:
return True
def replace_vm_ip(self, vm_data, rep=False):
"""
Add the Ctxt IP with the one that is actually working
in the inventory and in the general info file
"""
with open(self.conf_data_filename) as f:
general_conf_data = json.load(f)
for vm in general_conf_data['vms']:
if vm['id'] == vm_data['id']:
vm['ctxt_ip'] = vm_data['ctxt_ip']
vm['ctxt_port'] = vm_data['ctxt_port']
conf_data_filename = self.conf_data_filename
if rep:
conf_data_filename = conf_data_filename + ".rep"
with open(conf_data_filename, 'w+') as f:
json.dump(general_conf_data, f, indent=2)
# Now in the ansible inventory
filename = general_conf_data['conf_dir'] + "/hosts"
with open(filename) as f:
inventoy_data = ""
for line in f:
if line.startswith("%s_%s " % (vm_data['ip'], vm_data['id'])):
line = re.sub(" ansible_host=%s " % vm_data['ip'],
" ansible_host=%s " % vm_data['ctxt_ip'], line)
line = re.sub(" ansible_ssh_host=%s " % vm_data['ip'],
" ansible_ssh_host=%s " % vm_data['ctxt_ip'], line)
line = re.sub(" ansible_port=%s " % vm_data['remote_port'],
" ansible_port=%s " % vm_data['ctxt_port'], line)
line = re.sub(" ansible_ssh_port=%s " % vm_data['remote_port'],
" ansible_ssh_port=%s " % vm_data['ctxt_port'], line)
inventoy_data += line
with open(filename, 'w+') as f:
f.write(inventoy_data)
def changeVMCredentials(self, vm, pk_file):
"""
Update VM credentials
"""
if vm['os'] == "windows":
if 'passwd' in vm and vm['passwd'] and 'new_passwd' in vm and vm['new_passwd']:
try:
import winrm
except Exception:
self.logger.exception("Error importing winrm.")
return False
try:
url = "https://" + vm['ip'] + ":5986"
s = winrm.Session(url, auth=(vm['user'], vm['passwd']), server_cert_validation='ignore')
r = s.run_cmd('net', ['user', vm['user'], vm['new_passwd']])
# this part of the code is never reached ...
if r.status_code == 0:
vm['passwd'] = vm['new_passwd']
return True
else:
self.logger.error("Error changing password to Windows VM: " + r.std_out)
return False
except winrm.exceptions.AuthenticationError:
# if the password is correctly changed the command returns this
# error
try:
# let's check that the new password works
s = winrm.Session(url, auth=(vm['user'], vm['new_passwd']), server_cert_validation='ignore')
r = s.run_cmd('echo', ['OK'])
if r.status_code == 0:
vm['passwd'] = vm['new_passwd']
return True
else:
self.logger.error("Error changing password to Windows VM: " + r.std_out)
return False
except Exception:
self.logger.exception("Error changing password to Windows VM: " + vm['ip'] + ".")
return False
except Exception:
self.logger.exception("Error changing password to Windows VM: " + vm['ip'] + ".")
return False
else: # Linux VMs
# Check if we must change user credentials in the VM
if 'passwd' in vm and vm['passwd'] and 'new_passwd' in vm and vm['new_passwd']:
self.logger.info("Changing password to VM: " + vm['ip'])
try:
ssh_client = self.get_ssh(vm, pk_file, False)
sudo_pass = ""
if ssh_client.password:
sudo_pass = "echo '" + ssh_client.password + "' | "
(out, err, code) = ssh_client.execute(sudo_pass + 'sudo -S bash -c \'echo "' +
vm['user'] + ':' + vm['new_passwd'] +
'" | /usr/sbin/chpasswd && echo "OK"\' 2> /dev/null')
except Exception:
self.logger.exception("Error changing password to VM: " + vm['ip'] + ".")
return False
if code == 0:
vm['passwd'] = vm['new_passwd']
return True
else:
self.logger.error("Error changing password to VM: " + vm['ip'] + ". " + out + err)
return False
if 'new_public_key' in vm and vm['new_public_key'] and 'new_private_key' in vm and vm['new_private_key']:
self.logger.info("Changing public key to VM: " + vm['ip'])
try:
ssh_client = self.get_ssh(vm, pk_file, False)
(out, err, code) = ssh_client.execute_timeout('echo ' + vm['new_public_key'] +
' >> .ssh/authorized_keys', 5)
except Exception:
self.logger.exception("Error changing public key to VM: " + vm['ip'] + ".")
return False
if code != 0:
self.logger.error("Error changing public key to VM:: " + vm['ip'] + ". " + out + err)
return False
else:
vm['private_key'] = vm['new_private_key']
return True
return False
@staticmethod
def add_nat_gateway_tasks(playbook):
"""
Add tasks to enable NAT (Tested in GCE instances)
https://cloud.google.com/vpc/docs/special-configurations
"""
play_dir = os.path.dirname(playbook)
play_filename = os.path.basename(playbook)
new_playbook = os.path.join(play_dir, "nat_" + play_filename)
with open(playbook) as f:
yaml_data = yaml.safe_load(f)
task = {"raw": ("sudo sysctl -w net.ipv4.ip_forward=1; "
"sudo iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE; "
"sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE; "
"sudo iptables -t nat -D POSTROUTING -o ens4 -j MASQUERADE; "
"sudo iptables -t nat -A POSTROUTING -o ens4 -j MASQUERADE")}
task["name"] = "Activate NAT Gateway"
task["become"] = "yes"
task["ignore_errors"] = "yes"
yaml_data[0]['tasks'].append(task)
with open(new_playbook, 'w+') as f:
yaml.safe_dump(yaml_data, f)
return new_playbook
def install_ansible_modules(self, general_conf_data, playbook):
new_playbook = playbook
if 'ansible_modules' in general_conf_data and general_conf_data['ansible_modules']:
play_dir = os.path.dirname(playbook)
play_filename = os.path.basename(playbook)
new_playbook = os.path.join(play_dir, "mod_" + play_filename)
with open(playbook) as f:
yaml_data = yaml.safe_load(f)
galaxy_dependencies = []
needs_git = False
for galaxy_name in general_conf_data['ansible_modules']:
if galaxy_name:
self.logger.debug("Install %s with ansible-galaxy.", galaxy_name)
if galaxy_name.startswith("git"):
needs_git = True
parts = galaxy_name.split("|")
if len(parts) > 1:
url = parts[0]
rolename = parts[1]
dep = {"src": url, "name": rolename}
else:
url = rolename = galaxy_name
dep = {"src": url}
parts = url.split(",")
if len(parts) > 1:
url = parts[0]
version = parts[1]
dep = {"src": url, "version": version}
galaxy_dependencies.append(dep)
if needs_git:
task = {"package": "name=git state=present"}
task["name"] = "Install git"
task["become"] = "yes"
yaml_data[0]['tasks'].append(task)
if galaxy_dependencies:
now = str(int(time.time() * 100))
filename = "/tmp/galaxy_roles_%s.yml" % now
yaml_deps = yaml.safe_dump(galaxy_dependencies, default_flow_style=True)
self.logger.debug("Galaxy depencies file: %s" % yaml_deps)
task = {"copy": 'dest=%s content="%s"' % (filename, yaml_deps)}
task["name"] = "Create YAML file to install the roles with ansible-galaxy"
yaml_data[0]['tasks'].append(task)
task = {"command": "ansible-galaxy install -c -r %s" % filename}
task["name"] = "Install galaxy roles"
task["become"] = "yes"
yaml_data[0]['tasks'].append(task)
with open(new_playbook, 'w+') as f:
yaml.safe_dump(yaml_data, f)
return new_playbook
def LaunchAnsiblePlaybook(self, output, remote_dir, playbook_file, vm, threads, inventory_file,
pk_file, retries, change_pass_ok, vault_pass):
self.logger.debug('Call Ansible')
extra_vars = {'IM_HOST': vm['ip'] + "_" + str(vm['id'])}
user = None
if vm['os'] == "windows":
gen_pk_file = None
passwd = vm['passwd']
if 'new_passwd' in vm and vm['new_passwd'] and change_pass_ok:
passwd = vm['new_passwd']
else:
passwd = vm['passwd']
if 'new_passwd' in vm and vm['new_passwd'] and change_pass_ok:
passwd = vm['new_passwd']
if pk_file:
gen_pk_file = pk_file
else:
if vm['private_key'] and not vm['passwd']:
gen_pk_file = "/tmp/pk_" + vm['ip'] + ".pem"
pk_out = open(gen_pk_file, 'w')
pk_out.write(vm['private_key'])
pk_out.close()
os.chmod(gen_pk_file, 0o600)
else:
gen_pk_file = None
# Set local_tmp dir different for any VM
os.environ['DEFAULT_LOCAL_TMP'] = remote_dir + "/.ansible_tmp"
# it must be set before doing the import
from IM.ansible_utils.ansible_launcher import AnsibleThread
result = Queue()
t = AnsibleThread(result, output, playbook_file, threads, gen_pk_file,
passwd, retries, inventory_file, user, vault_pass, extra_vars)
t.start()
return (t, result)
|
indigo-dc/im
|
IM/CtxtAgentBase.py
|
Python
|
gpl-3.0
| 21,525
|
[
"Galaxy"
] |
c61eee070026176f966d0a8f86456eae41e1aa09c7ba58107548ad5308195fcf
|
# coding: utf-8
from __future__ import division, unicode_literals, print_function
"""
This module provides classes to run and analyze boltztrap on pymatgen band
structure objects. Boltztrap is a software interpolating band structures and
computing materials properties from this band structure using Boltzmann
semi-classical transport theory.
Boltztrap has been developped by Georg Madsen.
http://www.icams.de/content/departments/ams/madsen/boltztrap.html
You need the version 1.2.3
References are::
Madsen, G. K. H., and Singh, D. J. (2006).
BoltzTraP. A code for calculating band-structure dependent quantities.
Computer Physics Communications, 175, 67-71
"""
__author__ = "Geoffroy Hautier"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Geoffroy Hautier"
__email__ = "geoffroy@uclouvain.be"
__status__ = "Development"
__date__ = "August 23, 2013"
import os
import math
import numpy as np
import tempfile
from pymatgen.core.lattice import Lattice
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.electronic_structure.dos import Dos, Spin, CompleteDos
from pymatgen.electronic_structure.core import Orbital
from pymatgen.electronic_structure.plotter import DosPlotter
from monty.os.path import which
from monty.dev import requires
from monty.json import jsanitize
from pymatgen.core.units import Energy, Length
from pymatgen.core.physical_constants import e, ELECTRON_MASS
import subprocess
try:
import matplotlib.pyplot as plt
except ImportError:
pass
class BoltztrapRunner():
"""
This class is used to run Boltztrap on a band structure object.
Args:
bs:
A band structure object
nelec:
the number of electrons
dos_type:
two options here for the band structure integration: "HISTO"
(histogram) or "TETRA" using the tetrahedon method. TETRA
gives typically better results especially for DOSes but takes
more time
energy_grid:
the energy steps used for the integration. in eV
lpfac:
the number of interpolation points in the real space. By
default 10 gives 10 time more points in the real space than
the number of kpoints given in reciprocal space
type:
type of boltztrap usage by default BOLTZ to compute transport coefficients
but you can have also "FERMI" to compute fermi surface or more correctly to
get certain bands interpolated
soc:
results from spin-orbit coupling (soc) computations give typically non-polarized (no spin up or down)
results but 1 electron occupations. If the band structure comes from a soc computation, you should set
soc to True (default False)
"""
@requires(which('x_trans'),
"BoltztrapRunner requires the executables 'x_trans' to be in "
"the path. Please download the Boltztrap at "
"http://www.icams.de/content/departments/ams/madsen/boltztrap"
".html and follow the instructions in the README to compile "
"Bolztrap accordingly. Then add x_trans to your path")
def __init__(self, bs, nelec, dos_type="HISTO", energy_grid=0.005,
lpfac=10, type="BOLTZ", band_nb=None, tauref=0, tauexp=0, tauen=0, soc=False):
self.lpfac = lpfac
self._bs = bs
self._nelec = nelec
self.dos_type = dos_type
self.energy_grid = energy_grid
self.error = []
self.type = type
self.band_nb = band_nb
self.tauref=tauref
self.tauexp=tauexp
self.tauen=tauen
self.soc=soc
def _make_energy_file(self, file_name):
with open(file_name, 'w') as f:
f.write("test\n")
f.write(str(len(self._bs.kpoints))+"\n")
for i in range(len(self._bs.kpoints)):
tmp_eigs = []
for spin in self._bs._bands:
for j in range(int(math.floor(self._bs._nb_bands * 0.9))):
tmp_eigs.append(Energy(self._bs._bands[spin][j][i] -
self._bs.efermi, "eV").to("Ry"))
tmp_eigs.sort()
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2], len(tmp_eigs)))
for j in range(len(tmp_eigs)):
f.write("%18.8f\n" % float(tmp_eigs[j]))
def _make_struc_file(self, file_name):
sym = SpacegroupAnalyzer(self._bs._structure, symprec=0.01)
with open(file_name, 'w') as f:
f.write(self._bs._structure.composition.formula+" " +
str(sym.get_spacegroup_symbol())+"\n")
for i in range(3):
line = ''
for j in range(3):
line += "%12.5f" % (
Length(self._bs._structure.lattice.matrix[i][j],
"ang").to("bohr"))
f.write(line+'\n')
ops = sym.get_symmetry_dataset()['rotations']
f.write(str(len(ops))+"\n")
for c in ops:
f.write('\n'.join([' '.join([str(int(i)) for i in row])
for row in c]))
f.write('\n')
def _make_def_file(self, def_file_name):
with open(def_file_name,'w') as f:
so = ""
if self._bs.is_spin_polarized or self.soc:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n"+
"6,'boltztrap.outputtrans', 'unknown', 'formatted',0\n"+
"20,'boltztrap.struct', 'old', 'formatted',0\n"+
"10,'boltztrap.energy"+so+"', 'old', 'formatted',0\n"+
"48,'boltztrap.engre', 'unknown', 'unformatted',0\n"+
"49,'boltztrap.transdos', 'unknown', 'formatted',0\n"+
"50,'boltztrap.sigxx', 'unknown', 'formatted',0\n"+
"51,'boltztrap.sigxxx', 'unknown', 'formatted',0\n"+
"21,'boltztrap.trace', 'unknown', 'formatted',0\n"+
"22,'boltztrap.condtens', 'unknown', 'formatted',0\n"+
"24,'boltztrap.halltens', 'unknown', 'formatted',0\n"+
"30,'boltztrap_BZ.cube', 'unknown', 'formatted',0\n"+
"35,'boltztrap.banddat', 'unknown', 'formatted',0\n"+
"36,'boltztrap_band.gpl', 'unknown', 'formatted',0\n")
def _make_proj_files(self, file_name, def_file_name):
for o in Orbital.all_orbitals:
for site_nb in range(0, len(self._bs._structure.sites)):
if o in self._bs._projections[Spin.up][0][0]:
with open(file_name+"_"+str(site_nb)+"_"+str(o), 'w') as f:
f.write(self._bs._structure.composition.formula+"\n")
f.write(str(len(self._bs.kpoints))+"\n")
for i in range(len(self._bs.kpoints)):
tmp_proj = []
for spin in self._bs._bands:
for j in range(int(math.floor(self._bs._nb_bands * 0.9))):
tmp_proj.append(self._bs._projections[spin][j][i][o][site_nb])
#TODO deal with the sorting going on at the energy level!!!
f.write("%12.8f %12.8f %12.8f %d\n"
% (self._bs.kpoints[i].frac_coords[0],
self._bs.kpoints[i].frac_coords[1],
self._bs.kpoints[i].frac_coords[2], len(tmp_proj)))
for j in range(len(tmp_proj)):
f.write("%18.8f\n" % float(tmp_proj[j]))
with open(def_file_name,'w') as f:
so = ""
if self._bs.is_spin_polarized:
so = "so"
f.write("5, 'boltztrap.intrans', 'old', 'formatted',0\n"+
"6,'boltztrap.outputtrans', 'unknown', 'formatted',0\n"+
"20,'boltztrap.struct', 'old', 'formatted',0\n"+
"10,'boltztrap.energy"+so+"', 'old', 'formatted',0\n"+
"48,'boltztrap.engre', 'unknown', 'unformatted',0\n"+
"49,'boltztrap.transdos', 'unknown', 'formatted',0\n"+
"50,'boltztrap.sigxx', 'unknown', 'formatted',0\n"+
"51,'boltztrap.sigxxx', 'unknown', 'formatted',0\n"+
"21,'boltztrap.trace', 'unknown', 'formatted',0\n"+
"22,'boltztrap.condtens', 'unknown', 'formatted',0\n"+
"24,'boltztrap.halltens', 'unknown', 'formatted',0\n"+
"30,'boltztrap_BZ.cube', 'unknown', 'formatted',0\n"+
"35,'boltztrap.banddat', 'unknown', 'formatted',0\n"+
"36,'boltztrap_band.gpl', 'unknown', 'formatted',0\n")
i = 1000
for o in Orbital.all_orbitals:
for site_nb in range(0, len(self._bs._structure.sites)):
if o in self._bs._projections[Spin.up][0][0]:
f.write(str(i)+",\'"+file_name+"_"+str(site_nb)+"_"+str(o)
+ "\' \'old\', \'formatted\',0\n")
i += 1
def _make_intrans_file(self, file_name,
doping=[1e15, 1e16, 1e17, 1e18, 1e19, 1e20], type="BOLTZ", band_nb=None,
tauref=0, tauexp=0, tauen=0):
if type == "BOLTZ":
with open(file_name, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write("1 0 0 0.0 # iskip (not presently used) idebug setgap shiftgap \n")
fout.write(
"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,energy span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"), self._nelec))
fout.write("CALC # CALC (calculate expansion coeff), NOCALC read from file\n")
fout.write("%d # lpfac, number of latt-points per k-point\n" % self.lpfac)
fout.write("BOLTZ # run mode (only BOLTZ is supported)\n")
fout.write(".15 # (efcut) energy range of chemical potential\n")
fout.write("1300. 100. # Tmax, temperature grid\n")
fout.write("-1. # energyrange of bands given DOS output sig_xxx and dos_xxx (xxx is band number)\n")
fout.write(self.dos_type+"\n")
fout.write(str(tauref)+" "+str(tauexp)+" "+str(tauen)+" 0 0 0\n")
fout.write(str(2*len(doping))+"\n")
for d in doping:
fout.write(str(d)+"\n")
for d in doping:
fout.write(str(-d)+"\n")
elif type == "FERMI":
with open(file_name, 'w') as fout:
fout.write("GENE # use generic interface\n")
fout.write("1 0 0 0.0 # iskip (not presently used) idebug setgap shiftgap \n")
fout.write(
"0.0 %f 0.1 %6.1f # Fermilevel (Ry),energygrid,energy span around Fermilevel, "
"number of electrons\n"
% (Energy(self.energy_grid, "eV").to("Ry"), self._nelec))
fout.write("CALC # CALC (calculate expansion coeff), NOCALC read from file\n")
fout.write("%d # lpfac, number of latt-points per k-point\n" % self.lpfac)
fout.write("FERMI # run mode (only BOLTZ is supported)\n")
fout.write(str(band_nb+1))
def _make_all_files(self, path):
if self._bs.is_spin_polarized or self.soc:
self._make_energy_file(os.path.join(path, "boltztrap.energyso"))
else:
self._make_energy_file(os.path.join(path, "boltztrap.energy"))
self._make_struc_file(os.path.join(path, "boltztrap.struct"))
self._make_intrans_file(os.path.join(path, "boltztrap.intrans"), type=self.type, band_nb=self.band_nb)
self._make_def_file("BoltzTraP.def")
if len(self._bs._projections) != 0:
self._make_proj_files(os.path.join(path,"boltztrap.proj"), os.path.join(path, "BoltzTraP.def"))
def run(self, prev_sigma=None, path_dir=None, convergence=True):
if self.type == "FERMI":
convergence=False
dir_bz_name = "boltztrap"
path_dir_orig = path_dir
if path_dir is None:
temp_dir = tempfile.mkdtemp()
path_dir_orig = temp_dir
path_dir = os.path.join(temp_dir, dir_bz_name)
else:
path_dir = os.path.join(path_dir_orig, dir_bz_name)
if not os.path.exists(path_dir):
os.mkdir(path_dir)
else:
for c in os.listdir(path_dir):
os.remove(path_dir+"/"+c)
os.chdir(path_dir)
self._make_all_files(path_dir)
if self._bs.is_spin_polarized or self.soc:
p = subprocess.Popen(["x_trans", "BoltzTraP", "-so"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
else:
p = subprocess.Popen(["x_trans", "BoltzTraP"],
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
p.wait()
for c in p.communicate():
if "STOP error in factorization" in c:
raise BoltztrapError("STOP error in factorization")
with open(os.path.join(path_dir, dir_bz_name+".outputtrans")) as f:
warning = False
for l in f:
if "WARNING" in l:
warning = True
break
if warning:
print("There was a warning! Increase lpfac to " + \
str(self.lpfac * 2))
self.lpfac *= 2
self._make_intrans_file(os.path.join(path_dir,
dir_bz_name + ".intrans"))
if self.lpfac > 100:
raise BoltztrapError("lpfac higher than 100 and still a warning")
self.run(path_dir_orig)
#here we check if the doping levels were well computed
#sometimes boltztrap mess this up because of two small energy grids
analyzer = BoltztrapAnalyzer.from_files(path_dir)
doping_ok = True
for doping in ['n', 'p']:
for c in analyzer.mu_doping[doping]:
if len(analyzer.mu_doping[doping][c]) != len(analyzer.doping[doping]):
doping_ok = False
break
if doping == 'p' and \
sorted(analyzer.mu_doping[doping][c], reverse=True) != analyzer.mu_doping[doping][c]:
doping_ok = False
break
if doping == 'n' and sorted(analyzer.mu_doping[doping][c]) != analyzer.mu_doping[doping][c]:
doping_ok = False
break
if not doping_ok:
self.energy_grid /= 10
print("lowers energy grid to " + str(self.energy_grid))
if self.energy_grid < 0.00005:
raise BoltztrapError("energy grid lower than 0.00005 and still no good doping")
self._make_intrans_file(path_dir + "/" + dir_bz_name + ".intrans")
self.run(prev_sigma=None, path_dir=path_dir_orig)
analyzer = BoltztrapAnalyzer.from_files(path_dir)
#here, we test if a property (eff_mass tensor) converges
if convergence is False:
return path_dir
if prev_sigma is None or \
abs(sum(analyzer.get_eig_average_eff_mass_tensor()['n']) / 3
- prev_sigma)\
/ prev_sigma > 0.05:
if prev_sigma is not None:
print((abs(sum(analyzer.get_eig_average_eff_mass_tensor()['n'])
/ 3 - prev_sigma) / prev_sigma, \
self.lpfac, \
analyzer.get_average_eff_mass_tensor(300, 1e18)))
self.lpfac *= 2
if self.lpfac > 100:
raise BoltztrapError("lpfac higher than 100 and still not converged")
self._make_intrans_file(path_dir + "/" + dir_bz_name + ".intrans")
self.run(
prev_sigma=sum(analyzer.get_eig_average_eff_mass_tensor()
['n']) / 3, path_dir=path_dir_orig)
return path_dir
class BoltztrapError(Exception):
"""
Exception class for boltztrap.
Raised when the boltztrap gives an error
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "BoltztrapError : " + self.msg
class BoltztrapAnalyzer():
"""
Class used to store all the data from a boltztrap run
"""
def __init__(self, gap, mu_steps, cond, seebeck, kappa, hall, doping,
mu_doping, seebeck_doping, cond_doping, kappa_doping,
hall_doping, dos, dos_partial, carrier_conc, vol, warning):
"""
Constructor taking directly all the data generated by Boltztrap. You
won't probably use it directly but instead use the from_files and
from_dict methods.
Args:
gap: The gap after interpolation in eV
mu_steps: The steps of electron chemical potential (or Fermi
level) in eV.
cond: The electronic conductivity tensor divided by a constant
relaxation time (sigma/tau) at different temperature and
fermi levels.
The format is {temperature: [array of 3x3 tensors at each
fermi level in mu_steps]}. The units are 1/(Ohm*m*s).
seebeck: The Seebeck tensor at different temperatures and fermi
levels. The format is {temperature: [array of 3x3 tensors at
each fermi level in mu_steps]}. The units are V/K
kappa: The electronic thermal conductivity tensor divided by a
constant relaxation time (kappa/tau) at different temperature
and fermi levels. The format is {temperature: [array of 3x3
tensors at each fermi level in mu_steps]}
The units are W/(m*K*s)
hall: The hall tensor at different temperature and fermi levels
The format is {temperature: [array of 27 coefficients list at
each fermi level in mu_steps]}
The units are m^3/C
doping: The different doping levels that have been given to
Boltztrap. The format is {'p':[],'n':[]} with an array of
doping levels. The units are cm^-3
mu_doping: Gives the electron chemical potential (or Fermi level)
for a given set of doping.
Format is {'p':{temperature: [fermi levels],'n':{temperature:
[fermi levels]}}
the fermi level array is ordered according to the doping
levels in doping units for doping are in cm^-3 and for Fermi
level in eV
seebeck_doping: The Seebeck tensor at different temperatures and
doping levels. The format is {'p': {temperature: [Seebeck
tensors]}, 'n':{temperature: [Seebeck tensors]}}
The [Seebeck tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
Seebeck in V/K
cond_doping: The electronic conductivity tensor divided by a
constant relaxation time (sigma/tau) at different
temperatures and doping levels
The format is {'p':{temperature: [conductivity tensors]},
'n':{temperature: [conductivity tensors]}}
The [conductivity tensors] array is ordered according to the
doping levels in doping units for doping are in cm^-3 and for
conductivity in 1/(Ohm*m*s)
kappa_doping: The thermal conductivity tensor divided by a constant
relaxation time (kappa/tau) at different temperatures and
doping levels.
The format is {'p':{temperature: [thermal conductivity
tensors]},'n':{temperature: [thermal conductivity tensors]}}
The [thermal conductivity tensors] array is ordered according
to the doping levels in doping units for doping are in cm^-3
and for thermal conductivity in W/(m*K*s)
hall_doping: The Hall tensor at different temperatures and doping
levels.
The format is {'p':{temperature: [Hall tensors]},
'n':{temperature: [Hall tensors]}}
The [Hall tensors] array is ordered according to the doping
levels in doping and each Hall tensor is represented by a 27
coefficients list.
The units are m^3/C
carrier_conc: The concentration of carriers in electron (or hole)
per unit cell
dos: The dos computed by Boltztrap given as a pymatgen Dos object
dos_partial: Data for the partial DOS projected on sites and
orbitals
vol: Volume of the unit cell in angstrom cube (A^3)
warning: True if Boltztrap spitted out a warning
"""
self.gap = gap
self.mu_steps = mu_steps
self.cond = cond
self.seebeck = seebeck
self.kappa = kappa
self.hall = hall
self.warning = warning
self.doping = doping
self.mu_doping = mu_doping
self.seebeck_doping = seebeck_doping
self.cond_doping = cond_doping
self.kappa_doping = kappa_doping
self.hall_doping = hall_doping
self.carrier_conc = carrier_conc
self.dos = dos
self.vol = vol
self._dos_partial = dos_partial
@staticmethod
def _make_boltztrap_analyzer_from_data(
data_full, data_hall, data_dos, temperature_steps, mu_steps,
efermi, gap, doping, data_doping_full, data_doping_hall, vol,
warning=False):
"""
Make a BoltztrapAnalyzer object from raw data typically parse from
files.
"""
cond = {t: [] for t in temperature_steps}
seebeck = {t: [] for t in temperature_steps}
kappa = {t: [] for t in temperature_steps}
hall = {t: [] for t in temperature_steps}
carrier_conc = {t: [] for t in temperature_steps}
dos_full = {'energy': [], 'density': []}
warning = warning
new_doping = {'p': [], 'n': []}
for d in doping:
if d > 0:
new_doping['p'].append(d)
else:
new_doping['n'].append(-d)
mu_doping = {'p': {t: [] for t in temperature_steps},
'n': {t: [] for t in temperature_steps}}
seebeck_doping = {'p': {t: [] for t in temperature_steps},
'n': {t: [] for t in temperature_steps}}
cond_doping = {'p': {t: [] for t in temperature_steps},
'n': {t: [] for t in temperature_steps}}
kappa_doping = {'p': {t: [] for t in temperature_steps},
'n': {t: [] for t in temperature_steps}}
hall_doping = {'p': {t: [] for t in temperature_steps},
'n': {t: [] for t in temperature_steps}}
for d in data_full:
carrier_conc[d[1]].append(d[2])
tens_cond = [[d[3], d[4], d[5]],
[d[6], d[7], d[8]],
[d[9], d[10], d[11]]]
cond[d[1]].append(tens_cond)
tens_seebeck = [[d[12], d[13], d[14]],
[d[15], d[16], d[17]],
[d[18], d[19], d[20]]]
seebeck[d[1]].append(tens_seebeck)
tens_kappa = [[d[21], d[22], d[23]],
[d[24], d[25], d[26]],
[d[27], d[28], d[29]]]
kappa[d[1]].append(tens_kappa)
for d in data_hall:
hall_tens = d[3:]
hall[d[1]].append(hall_tens)
for d in data_doping_full:
tens_cond = [[d[2], d[3], d[4]],
[d[5], d[6], d[7]],
[d[8], d[9], d[10]]]
tens_seebeck = [[d[11], d[12], d[13]],
[d[14], d[15], d[16]],
[d[17], d[18], d[19]]]
tens_kappa = [[d[20], d[21], d[22]],
[d[23], d[24], d[25]],
[d[26], d[27], d[28]]]
if d[1] < 0:
mu_doping['n'][d[0]].append(Energy(d[-1], "Ry").to("eV"))
cond_doping['n'][d[0]].append(tens_cond)
seebeck_doping['n'][d[0]].append(tens_seebeck)
kappa_doping['n'][d[0]].append(tens_kappa)
else:
mu_doping['p'][d[0]].append(Energy(d[-1], "Ry").to("eV"))
cond_doping['p'][d[0]].append(tens_cond)
seebeck_doping['p'][d[0]].append(tens_seebeck)
kappa_doping['p'][d[0]].append(tens_kappa)
for i in range(len(data_doping_hall)):
hall_tens = [data_hall[i][j] for j in range(3, len(data_hall[i]))]
if data_doping_hall[i][1] < 0:
hall_doping['n'][data_doping_hall[i][0]].append(hall_tens)
else:
hall_doping['p'][data_doping_hall[i][0]].append(hall_tens)
for t in data_dos['total']:
dos_full['energy'].append(t[0])
dos_full['density'].append(t[1])
dos = Dos(efermi, dos_full['energy'], {Spin.up: dos_full['density']})
dos_partial = data_dos['partial']
return BoltztrapAnalyzer(
gap, mu_steps, cond, seebeck, kappa, hall, new_doping, mu_doping,
seebeck_doping, cond_doping, kappa_doping, hall_doping, dos, dos_partial, carrier_conc,
vol, warning)
def get_complete_dos(self, structure):
"""
Gives a CompleteDos object with the DOS from the interpolated projected band structure
Args:
the structure (necessary to identify sites for projection)
Returns:
a CompleteDos object
"""
pdoss = {}
for s in self._dos_partial:
if structure.sites[int(s)] not in pdoss:
pdoss[structure.sites[int(s)]] = {}
for o in self._dos_partial[s]:
if Orbital.from_string(o) not in pdoss[structure.sites[int(s)]]:
pdoss[structure.sites[int(s)]][Orbital.from_string(o)] = {}
pdoss[structure.sites[int(s)]][Orbital.from_string(o)][Spin.up] = self._dos_partial[s][o]
return CompleteDos(structure, total_dos=self.dos, pdoss=pdoss)
def get_mu_bounds(self, temp=300):
return min(self.mu_doping['p'][temp]), max(self.mu_doping['n'][temp])
def get_average_eff_mass_tensor(self, temperature=300, doping=1e18):
"""
Gives the average effective mass tensor at a given temperature and
doping level.
The average effective mass tensor is defined as the integrated
average of the second derivative
This effective mass tensor takes into account:
-non-parabolicity
-multiple extrema
-multiple bands
Args:
temperature: The temperature in K
doping: The doping in cm^-3
Returns:
a dictionary {'p':[[]],'n':[[]]}
The arrays are 3x3 and represent the effective mass tensor
The 'p' links to hole effective mass tensor and 'n' to electron
effective mass tensor.
"""
index = None
import math
results = {'p': [], 'n': []}
for t in ['n', 'p']:
for d in range(len(self.doping[t])):
if math.fabs(self.doping[t][d]-doping) < 0.001:
index = d
results[t] = np.linalg.inv(
self.cond_doping[t][temperature][index]) * doping \
* 10 ** 6 * e ** 2 / ELECTRON_MASS
return results
def get_eig_average_eff_mass_tensor(self, temperature=300, doping=1e18):
"""
Gives the eigenvalues of the average effective mass tensor at a given
temperature and doping level. The average effective mass tensor is
defined as the integrated average of the second derivative
This effective mass tensor takes into account:
-non-parabolicity
-multiple extrema
-multiple bands
Args:
temperature: The temperature in K
doping: The doping in cm^-3
Returns:
a dictionnary {'p':[],'n':[]}
The list contains the sorted three eigenvalues of the symmetric
tensor. The 'p' links to hole effective mass tensor and 'n' to
electron effective mass tensor.
"""
return {'p':
sorted(np.linalg.eigh(self.get_average_eff_mass_tensor(
temperature=temperature, doping=doping)['p'])[0]),
'n':
sorted(np.linalg.eigh(self.get_average_eff_mass_tensor(
temperature=temperature, doping=doping)['n'])[0])}
def get_seebeck_eig(self):
"""
Gives the three eigenvalues of the seebeck tensor at different doping levels
Returns:
a dictionnary {'p':[],'n':[]}
The list contains the sorted three eigenvalues of the symmetric
Seebeck tensor. The 'p' links to Seebeck at p-type doping and 'n' to
the Seebeck at n-type doping.
"""
full_tensor = self.seebeck_doping
result = {doping: {t: [] for t in self.seebeck_doping[doping]} for doping in self.seebeck_doping}
for doping in full_tensor:
for temp in full_tensor[doping]:
for i in range(len(self.doping[doping])):
result[doping][temp].append(sorted(np.linalg.eigh(full_tensor[doping][temp][i])[0]))
return result
@staticmethod
def from_files(path_dir):
"""
get a BoltztrapAnalyzer object from a set of files
Args:
path_dir: directory where the boltztrap files are
Returns:
a BoltztrapAnalyzer object
"""
t_steps = set()
m_steps = set()
gap = None
doping = []
data_doping_full = []
data_doping_hall = []
with open(os.path.join(path_dir, "boltztrap.condtens"), 'r') as f:
data_full = []
for line in f:
if not line.startswith("#"):
t_steps.add(int(float(line.split()[1])))
m_steps.add(float(line.split()[0]))
data_full.append([float(c) for c in line.split()])
with open(os.path.join(path_dir, "boltztrap.halltens"), 'r') as f:
data_hall = []
for line in f:
if not line.startswith("#"):
data_hall.append([float(c) for c in line.split()])
data_dos = {'total': [], 'partial':{}}
with open(os.path.join(path_dir, "boltztrap.transdos"), 'r') as f:
count_series = 0
for line in f:
if not line.startswith(" #"):
data_dos['total'].append([Energy(float(line.split()[0]), "Ry").to("eV"),
float(line.split()[1])])
else:
count_series += 1
if count_series>1:
break
#data_dos['total'].append([float(line.split()[0]),
# float(line.split()[1])])
for file_name in os.listdir(path_dir):
if file_name.endswith("transdos") and file_name != 'boltztrap.transdos':
tokens = file_name.split(".")[1].split("_")
with open(os.path.join(path_dir, file_name), 'r') as f:
for line in f:
if not line.startswith(" #"):
if tokens[1] not in data_dos['partial']:
data_dos['partial'][tokens[1]] = {}
if tokens[2] not in data_dos['partial'][tokens[1]]:
data_dos['partial'][tokens[1]][tokens[2]] = []
data_dos['partial'][tokens[1]][tokens[2]].append(float(line.split()[1]))
with open(os.path.join(path_dir, "boltztrap.outputtrans"), 'r') as f:
warning = False
step = 0
for line in f:
if "WARNING" in line:
warning = True
if line.startswith("VBM"):
efermi = Energy(line.split()[1], "Ry").to("eV")
if step == 2:
l_tmp = line.split("-")[1:]
doping.extend([-float(d) for d in l_tmp])
step = 0
if step == 1:
doping.extend([float(d) for d in line.split()])
step = 2
if line.startswith("Doping levels to be output for") or \
line.startswith(" Doping levels to be output for"):
step = 1
if line.startswith("Egap:"):
gap = float(line.split()[1])
if len(doping) != 0:
with open(os.path.join(path_dir, "boltztrap.condtens_fixdoping"), 'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_full.append([float(c)
for c in line.split()])
with open(os.path.join(path_dir, "boltztrap.halltens_fixdoping"), 'r') as f:
for line in f:
if not line.startswith("#") and len(line) > 2:
data_doping_hall.append([float(c) for c in line.split()])
with open(os.path.join(path_dir, "boltztrap.struct"), 'r') as f:
tokens = f.readlines()
vol = Lattice([[Length(float(tokens[i].split()[j]), "bohr").to("ang")
for j in range(3)] for i in range(1, 4)]).volume
return BoltztrapAnalyzer._make_boltztrap_analyzer_from_data(
data_full, data_hall, data_dos, sorted([t for t in t_steps]),
sorted([Energy(m, "Ry").to("eV") for m in m_steps]), efermi, Energy(gap, "Ry").to("eV"),
doping, data_doping_full, data_doping_hall, vol, warning)
def as_dict(self):
results = {'gap': self.gap,
'mu_steps': self.mu_steps,
'cond': self.cond,
'seebeck': self.seebeck,
'kappa': self.kappa,
'hall': self.hall,
'warning': self.warning, 'doping': self.doping,
'mu_doping': self.mu_doping,
'seebeck_doping': self.seebeck_doping,
'cond_doping': self.cond_doping,
'kappa_doping': self.kappa_doping,
'hall_doping': self.hall_doping,
'dos': self.dos.as_dict(),
'dos_partial': self._dos_partial,
'carrier_conc': self.carrier_conc,
'vol': self.vol}
return jsanitize(results)
@staticmethod
def from_dict(data):
def _make_float_array(a):
res = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
for i in range(3):
for j in range(3):
res[i][j] = float(a[i][j])
return res
def _make_float_hall(a):
return [i for i in a[:27]]
return BoltztrapAnalyzer(
float(data['gap']), [float(d) for d in data['mu_steps']],
{int(d): [_make_float_array(v) for v in data['cond'][d]]
for d in data['cond']},
{int(d): [_make_float_array(v) for v in data['seebeck'][d]]
for d in data['seebeck']},
{int(d): [_make_float_array(v) for v in data['kappa'][d]]
for d in data['kappa']},
{int(d): [_make_float_hall(v) for v in data['hall'][d]]
for d in data['hall']},
{'p': [float(d) for d in data['doping']['p']],
'n': [float(d) for d in data['doping']['n']]},
{'p': {int(d): [float(v) for v in data['mu_doping']['p'][d]]
for d in data['mu_doping']['p']},
'n': {int(d): [float(v) for v in data['mu_doping']['n'][d]]
for d in data['mu_doping']['n']}},
{'p': {int(d): [_make_float_array(v)
for v in data['seebeck_doping']['p'][d]]
for d in data['seebeck_doping']['p']},
'n': {int(d): [_make_float_array(v)
for v in data['seebeck_doping']['n'][d]]
for d in data['seebeck_doping']['n']}},
{'p': {int(d): [_make_float_array(v)
for v in data['cond_doping']['p'][d]]
for d in data['cond_doping']['p']},
'n': {int(d): [_make_float_array(v)
for v in data['cond_doping']['n'][d]]
for d in data['cond_doping']['n']}},
{'p': {int(d): [_make_float_array(v)
for v in data['kappa_doping']['p'][d]]
for d in data['kappa_doping']['p']},
'n': {int(d): [_make_float_array(v)
for v in data['kappa_doping']['n'][d]]
for d in data['kappa_doping']['n']}},
{'p': {int(d): [_make_float_hall(v)
for v in data['hall_doping']['p'][d]]
for d in data['hall_doping']['p']},
'n': {int(d): [_make_float_hall(v)
for v in data['hall_doping']['n'][d]]
for d in data['hall_doping']['n']}},
Dos.from_dict(data['dos']), data['dos_partial'], data['carrier_conc'],
data['vol'], str(data['warning']))
class BoltztrapPlotter():
"""
class containing methods to plot the data from Boltztrap.
Args:
bz: a BoltztrapAnalyzer object
"""
def __init__(self, bz):
self._bz = bz
def _plot_doping(self, temp):
limit = 2.21e15
plt.axvline(self._bz.mu_doping['n'][temp][1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['n'][temp][1] + 0.01,
limit,
"$n$=10$^{" + str(math.log10(self._bz.doping['n'][1])) + "}$",
color='b')
plt.axvline(self._bz.mu_doping['n'][temp][-1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['n'][temp][-1] + 0.01,
limit,
"$n$=10$^{" + str(math.log10(self._bz.doping['n'][-1]))
+ "}$", color='b')
plt.axvline(self._bz.mu_doping['p'][temp][1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['p'][temp][1] + 0.01,
limit,
"$p$=10$^{" + str(math.log10(self._bz.doping['p'][1])) + "}$",
color='b')
plt.axvline(self._bz.mu_doping['p'][temp][-1], linewidth=3.0,
linestyle="--")
plt.text(self._bz.mu_doping['p'][temp][-1] + 0.01,
limit, "$p$=10$^{" +
str(math.log10(self._bz.doping['p'][-1])) + "}$",
color='b')
def _plot_BG_limits(self):
plt.axvline(0.0, color='k', linewidth=3.0)
plt.axvline(self._bz.gap, color='k', linewidth=3.0)
def plot_seebeck(self, temp=300, xlim=None):
"""
Plot the seebeck coefficient in function of Fermi level
Args:
temp:
the temperature
xlim:
a list of min and max fermi energy by default (0, and band gap)
Returns:
a matplotlib object
"""
plt.plot(self._bz.mu_steps, [np.linalg.eigh(c)[0] * 1e6
for c in self._bz.seebeck[temp]],
linewidth=3.0)
self._plot_BG_limits()
self._plot_doping(temp)
plt.legend(['S$_1$', 'S$_2$', 'S$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim[0], xlim[1])
plt.ylabel("Seebeck \n coefficient ($\mu$V/K)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_conductivity(self, temp=300, tau=None, xlim=None):
"""
Plot the conductivity in function of Fermi level. Semi-log plot
Args:
temp: the temperature
xlim: a list of min and max fermi energy by default (0, and band
gap)
tau: A relaxation time in s. By default none and the plot is by
units of relaxation time
Returns:
a matplotlib object
"""
if tau is None:
plt.semilogy(self._bz.mu_steps, [sorted(np.linalg.eigh(c)[0] * 0.01)
for c in self._bz.cond[temp]],
linewidth=3.0)
else:
plt.semilogy(self._bz.mu_steps, [sorted(np.linalg.eigh(c)[0] * 0.01 * tau)
for c in self._bz.cond[temp]],
linewidth=3.0)
self._plot_BG_limits()
self._plot_doping(temp)
plt.legend(['$\sigma_1$', '$\sigma_2$', '$\sigma_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim)
if tau is None:
plt.ylim([1e13, 1e20])
else:
plt.ylim([1e13*tau,1e20*tau])
if tau is None:
plt.ylabel("conductivity, $\sigma$/${\\tau}$ (1/($\Omega$ m s))",
fontsize=30.0)
else:
plt.ylabel("conductivity,\n $\sigma$ (1/($\Omega$ m))",
fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30.0)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
def plot_dos(self, sigma=0.05):
"""
plot dos
Args:
sigma: a smearing
Returns:
a matplotlib object
"""
plotter = DosPlotter(sigma=sigma)
plotter.add_dos("t", self._bz.dos)
return plotter.get_plot()
def plot_carriers(self, temp=300):
"""
Plot the carrier concentration in function of Fermi level
Args:
temp: the temperature
Returns:
a matplotlib object
"""
plt.semilogy(self._bz.mu_steps,
abs(self._bz.carrier_conc[temp]/(self._bz.vol*1e-24)),
linewidth=3.0, color='r')
self._plot_BG_limits()
self._plot_doping(temp)
plt.xlim(-0.5, self._bz.gap+0.5)
plt.ylim(1e14,1e22)
plt.ylabel("carrier concentration (cm-3)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
return plt
|
Dioptas/pymatgen
|
pymatgen/electronic_structure/boltztrap.py
|
Python
|
mit
| 44,658
|
[
"BoltzTrap",
"pymatgen"
] |
8cf70500387577120e2186821c8d45c72e45630820d0d3fcba7996ddde0095ae
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for methods relating to sending emails."""
from __future__ import annotations
import datetime
import logging
import types
from core import feconf
from core.constants import constants
from core.domain import config_domain
from core.domain import config_services
from core.domain import email_manager
from core.domain import exp_domain
from core.domain import html_cleaner
from core.domain import question_domain
from core.domain import rights_domain
from core.domain import subscription_services
from core.domain import suggestion_services
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
(email_models, suggestion_models) = models.Registry.import_models(
[models.NAMES.email, models.NAMES.suggestion])
class FailedMLTest(test_utils.EmailTestBase):
"""Test that email functionality for sending failed ML Job emails
works.
"""
def setUp(self):
super(FailedMLTest, self).setUp()
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.login(self.CURRICULUM_ADMIN_EMAIL, is_super_admin=True)
config_property = config_domain.Registry.get_config_property(
'notification_user_ids_for_failed_tasks')
config_property.set_value(
'committer_id', [self.get_user_id_from_email(
self.CURRICULUM_ADMIN_EMAIL)])
def test_send_failed_ml_email(self):
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
# Make sure there are no emails already sent.
messages = self._get_sent_email_messages(feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(messages), 0)
messages = (
self._get_sent_email_messages(self.CURRICULUM_ADMIN_EMAIL))
self.assertEqual(len(messages), 0)
# Send job failure email with mock Job ID.
email_manager.send_job_failure_email('123ABC')
# Make sure emails are sent.
messages = self._get_sent_email_messages(feconf.ADMIN_EMAIL_ADDRESS)
expected_subject = 'Failed ML Job'
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].subject, expected_subject)
messages = (
self._get_sent_email_messages(self.CURRICULUM_ADMIN_EMAIL))
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].subject, expected_subject)
class EmailToAdminTest(test_utils.EmailTestBase):
"""Test that emails are correctly sent to the admin."""
def test_email_to_admin_is_sent_correctly(self):
dummy_system_name = 'DUMMY_SYSTEM_NAME'
dummy_system_address = 'dummy@system.com'
dummy_admin_address = 'admin@system.com'
send_email_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
system_name_ctx = self.swap(
feconf, 'SYSTEM_EMAIL_NAME', dummy_system_name)
system_email_ctx = self.swap(
feconf, 'SYSTEM_EMAIL_ADDRESS', dummy_system_address)
admin_email_ctx = self.swap(
feconf, 'ADMIN_EMAIL_ADDRESS', dummy_admin_address)
with send_email_ctx, system_name_ctx, system_email_ctx, admin_email_ctx:
# Make sure there are no emails already sent.
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(messages), 0)
# Send an email to admin.
email_manager.send_mail_to_admin('Dummy Subject', 'Dummy Body')
# Make sure emails are sent.
messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].sender, 'DUMMY_SYSTEM_NAME <dummy@system.com>')
self.assertEqual(messages[0].to, ['admin@system.com'])
self.assertEqual(messages[0].subject, 'Dummy Subject')
self.assertIn('Dummy Body', messages[0].html)
class DummyMailTest(test_utils.EmailTestBase):
"""Test that emails are correctly sent to the testing email id."""
def test_sending_emails(self):
dummy_system_name = 'DUMMY_SYSTEM_NAME'
dummy_system_address = 'dummy@system.com'
dummy_receiver_address = 'admin@system.com'
send_email_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
system_name_ctx = self.swap(
feconf, 'SYSTEM_EMAIL_NAME', dummy_system_name)
system_email_ctx = self.swap(
feconf, 'SYSTEM_EMAIL_ADDRESS', dummy_system_address)
admin_email_ctx = self.swap(
feconf, 'ADMIN_EMAIL_ADDRESS', dummy_receiver_address)
with send_email_ctx, system_name_ctx, system_email_ctx, admin_email_ctx:
# Make sure there are no emails already sent.
messages = self._get_sent_email_messages(
dummy_receiver_address)
self.assertEqual(len(messages), 0)
# Send an email.
email_manager.send_dummy_mail_to_admin(dummy_system_name)
# Make sure emails are sent.
messages = self._get_sent_email_messages(dummy_receiver_address)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].sender, 'DUMMY_SYSTEM_NAME <dummy@system.com>')
self.assertEqual(messages[0].to, [dummy_receiver_address])
self.assertEqual(messages[0].subject, 'Test Mail')
self.assertIn(
'This is a test mail from DUMMY_SYSTEM_NAME', messages[0].html)
class EmailRightsTest(test_utils.GenericTestBase):
"""Test that only certain users can send certain types of emails."""
def setUp(self):
super(EmailRightsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
def test_sender_id_validation(self):
sender_ids_to_test = [
feconf.SYSTEM_COMMITTER_ID, self.admin_id, self.moderator_id,
self.editor_id]
# These are given in the order of user_ids_to_test.
expected_validation_results = {
feconf.EMAIL_INTENT_SIGNUP: (True, False, False, False),
feconf.EMAIL_INTENT_DAILY_BATCH: (True, False, False, False),
feconf.EMAIL_INTENT_MARKETING: (True, False, False, False),
feconf.EMAIL_INTENT_UNPUBLISH_EXPLORATION: (
True, False, True, False),
feconf.EMAIL_INTENT_DELETE_EXPLORATION: (
True, False, True, False),
}
for intent, results in expected_validation_results.items():
for ind, sender_id in enumerate(sender_ids_to_test):
if results[ind]:
email_manager.require_sender_id_is_valid(
intent, sender_id)
else:
with self.assertRaisesRegex(
Exception, 'Invalid sender_id'
):
email_manager.require_sender_id_is_valid(
intent, sender_id)
# Also test null and invalid intent strings.
with self.assertRaisesRegex(Exception, 'Invalid email intent string'):
email_manager.require_sender_id_is_valid(
'', feconf.SYSTEM_COMMITTER_ID)
with self.assertRaisesRegex(Exception, 'Invalid email intent string'):
email_manager.require_sender_id_is_valid(
'', self.admin_id)
with self.assertRaisesRegex(Exception, 'Invalid email intent string'):
email_manager.require_sender_id_is_valid(
'invalid_intent', feconf.SYSTEM_COMMITTER_ID)
with self.assertRaisesRegex(Exception, 'Invalid email intent string'):
email_manager.require_sender_id_is_valid(
'invalid_intent', self.admin_id)
class ExplorationMembershipEmailTests(test_utils.EmailTestBase):
"""Tests that sending exploration membership email works as expected."""
EXPLORATION_TITLE = 'Title'
def setUp(self):
super(ExplorationMembershipEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title=self.EXPLORATION_TITLE)
self.expected_email_subject = (
'%s - invitation to collaborate') % self.EXPLORATION_TITLE
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.can_send_editor_role_email_ctx = self.swap(
feconf, 'CAN_SEND_EDITOR_ROLE_EMAILS', True)
self.can_not_send_editor_role_email_ctx = self.swap(
feconf, 'CAN_SEND_EDITOR_ROLE_EMAILS', False)
def test_role_email_is_sent_when_editor_assigns_role(self):
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
self.login(self.EDITOR_EMAIL)
csrf_token = self.get_new_csrf_token()
self.put_json('%s/%s' % (
feconf.EXPLORATION_RIGHTS_PREFIX, self.exploration.id), {
'version': self.exploration.version,
'new_member_username': self.NEW_USER_USERNAME,
'new_member_role': rights_domain.ROLE_EDITOR,
}, csrf_token=csrf_token)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
def test_email_is_not_sent_if_recipient_has_declined_such_emails(self):
user_services.update_email_preferences(
self.new_user_id, True, False, False, False)
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_OWNER,
self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_OWNER,
self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_not_sent_if_can_send_editor_role_emails_is_false(self):
with self.can_send_emails_ctx, self.can_not_send_editor_role_email_ctx:
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id,
rights_domain.ROLE_EDITOR, self.exploration.id,
self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_role_emails_sent_are_correct(self):
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_VIEWER,
self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
# Check that email details are correct.
self.assertEqual(sent_email_model.recipient_id, self.new_user_id)
self.assertEqual(
sent_email_model.recipient_email, self.NEW_USER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (
self.EDITOR_USERNAME, feconf.NOREPLY_EMAIL_ADDRESS))
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_EDITOR_ROLE_NOTIFICATION)
self.assertEqual(
sent_email_model.subject, self.expected_email_subject)
def test_correct_rights_are_written_in_manager_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you manager rights to their '
'exploration, '
'"<a href="https://www.oppia.org/create/A">Title</a>", '
'on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>Change the exploration permissions</li><br>'
'<li>Edit the exploration</li><br>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="https://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you manager rights to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- Change the exploration permissions\n'
'- Edit the exploration\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Manager.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_OWNER,
self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
def test_correct_rights_are_written_in_editor_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you editor rights to their '
'exploration, '
'"<a href="https://www.oppia.org/create/A">Title</a>"'
', on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>Edit the exploration</li><br>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="https://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you editor rights to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- Edit the exploration\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Editor.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_EDITOR,
self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
def test_correct_rights_are_written_in_voice_artist_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you voice artist rights to their '
'exploration, '
'"<a href="https://www.oppia.org/create/A">Title</a>"'
', on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>Voiceover the exploration</li><br>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="https://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you voice artist rights to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- Voiceover the exploration\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Voice Artist.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id,
rights_domain.ROLE_VOICE_ARTIST, self.exploration.id,
self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
def test_correct_rights_are_written_in_playtester_role_email_body(self):
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'<b>editor</b> has granted you playtest access to their '
'exploration, '
'"<a href="https://www.oppia.org/create/A">Title</a>"'
', on Oppia.org.<br>'
'<br>'
'This allows you to:<br>'
'<ul>'
'<li>View and playtest the exploration</li><br>'
'</ul>'
'You can find the exploration '
'<a href="https://www.oppia.org/create/A">here</a>.<br>'
'<br>'
'Thanks, and happy collaborating!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has granted you playtest access to their '
'exploration, "Title", on Oppia.org.\n'
'\n'
'This allows you to:\n'
'- View and playtest the exploration\n'
'You can find the exploration here.\n'
'\n'
'Thanks, and happy collaborating!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that correct email content is sent for Playtester.
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_VIEWER,
self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
def test_correct_undefined_role_raises_an_exception(self):
with self.can_send_emails_ctx, self.can_send_editor_role_email_ctx:
# Check that an exception is raised when an invalid
# role is supplied.
with self.assertRaisesRegex(Exception, 'Invalid role'):
email_manager.send_role_notification_email(
self.editor_id, self.new_user_id, rights_domain.ROLE_NONE,
self.exploration.id, self.exploration.title)
class SignupEmailTests(test_utils.EmailTestBase):
"""Test that signup-email sending functionality works as expected."""
def setUp(self):
super(SignupEmailTests, self).setUp()
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.new_footer = (
'Unsubscribe from emails at your '
'<a href="https://www.site.com/prefs">Preferences page</a>.')
self.new_email_content = {
'subject': 'Welcome!',
'html_body': (
'Here is some HTML text.<br>'
'With a <b>bold</b> bit and an <i>italic</i> bit.<br>')
}
self.expected_text_email_content = (
'Hi editor,\n'
'\n'
'Here is some HTML text.\n'
'With a bold bit and an italic bit.\n'
'\n'
'\n'
'Unsubscribe from emails at your Preferences page.')
self.expected_html_email_content = (
'Hi editor,<br>'
'<br>'
'Here is some HTML text.<br>'
'With a <b>bold</b> bit and an <i>italic</i> bit.<br>'
'<br>'
'<br>'
'Unsubscribe from emails at your '
'<a href="https://www.site.com/prefs">Preferences page</a>.')
def test_email_not_sent_if_config_does_not_permit_it(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', False):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# Check that no email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_email_not_sent_if_content_config_is_not_modified(self):
can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
logged_errors = []
def _log_error_for_tests(error_message):
"""Appends the error message to the logged errors list."""
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, log_new_error_ctx:
self.assertEqual(log_new_error_counter.times_called, 0)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
# No user-facing error should surface.
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# However, an error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertEqual(
logged_errors[0],
'Please ensure that the value for the admin config property '
'SIGNUP_EMAIL_CONTENT is set, before allowing post-signup '
'emails to be sent.')
# Check that no email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_email_not_sent_if_content_config_is_partially_modified(self):
can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name, {
'subject': (
email_manager.SIGNUP_EMAIL_CONTENT.default_value[
'subject']),
'html_body': 'New HTML body.',
})
logged_errors = []
def _log_error_for_tests(error_message):
"""Appends the error message to the logged errors list."""
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, log_new_error_ctx:
self.assertEqual(log_new_error_counter.times_called, 0)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
# No user-facing error should surface.
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# However, an error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertEqual(
logged_errors[0],
'Please ensure that the value for the admin config property '
'SIGNUP_EMAIL_CONTENT is set, before allowing post-signup '
'emails to be sent.')
# Check that no email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_email_with_bad_content_is_not_sent(self):
can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name, {
'subject': 'New email subject',
'html_body': 'New HTML body.<script>alert(3);</script>',
})
logged_errors = []
def _log_error_for_tests(error_message):
"""Appends the error message to the logged errors list."""
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, log_new_error_ctx:
self.assertEqual(log_new_error_counter.times_called, 0)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
# No user-facing error should surface.
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# However, an error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertTrue(logged_errors[0].startswith(
'Original email HTML body does not match cleaned HTML body'))
# Check that no email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
def test_contents_of_signup_email_are_correct(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
config_services.set_property(
self.admin_id, email_manager.EMAIL_SENDER_NAME.name,
'Email Sender')
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# Check that an email was sent with the correct content.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
self.assertEqual(
messages[0].sender,
'Email Sender <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(messages[0].to, [self.EDITOR_EMAIL])
self.assertEqual(messages[0].subject, 'Welcome!')
self.assertEqual(messages[0].body, self.expected_text_email_content)
self.assertEqual(messages[0].html, self.expected_html_email_content)
def test_email_only_sent_once_for_repeated_signups_by_same_user(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# Check that an email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
# Send a second POST request.
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# Check that no new email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
def test_email_only_sent_if_signup_was_successful(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL,
{
'agreed_to_terms': True,
'username': 'BadUsername!!!',
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
},
csrf_token=csrf_token, expected_status_int=400)
# Check that no email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(0, len(messages))
# Redo the signup process with a good username.
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
def test_record_of_sent_email_is_written_to_datastore(self):
with self.swap(feconf, 'CAN_SEND_EMAILS', True):
config_services.set_property(
self.admin_id, email_manager.EMAIL_FOOTER.name,
self.new_footer)
config_services.set_property(
self.admin_id, email_manager.SIGNUP_EMAIL_CONTENT.name,
self.new_email_content)
config_services.set_property(
self.admin_id, email_manager.EMAIL_SENDER_NAME.name,
'Email Sender')
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
self.login(self.EDITOR_EMAIL)
self.get_html_response(feconf.SIGNUP_URL + '?return_url=/')
csrf_token = self.get_new_csrf_token()
self.post_json(
feconf.SIGNUP_DATA_URL, {
'agreed_to_terms': True,
'username': self.EDITOR_USERNAME,
'default_dashboard': constants.DASHBOARD_TYPE_LEARNER
}, csrf_token=csrf_token)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# Check that the contents of the model are correct.
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.recipient_id,
self.get_user_id_from_email(self.EDITOR_EMAIL))
self.assertEqual(
sent_email_model.recipient_email, self.EDITOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Email Sender <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_SIGNUP)
self.assertEqual(sent_email_model.subject, 'Welcome!')
self.assertEqual(
sent_email_model.html_body, self.expected_html_email_content)
class DuplicateEmailTests(test_utils.EmailTestBase):
"""Test that duplicate emails are not sent."""
def setUp(self):
super(DuplicateEmailTests, self).setUp()
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.CURRICULUM_ADMIN_EMAIL)
self.set_curriculum_admins([self.CURRICULUM_ADMIN_USERNAME])
self.new_footer = (
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
self.new_email_subject = 'THIS IS A PLACEHOLDER.'
self.new_email_html_body = 'Hi %s,<br><br>%s<br><br>%s' % (
self.NEW_USER_USERNAME,
'THIS IS A <b>PLACEHOLDER</b> AND SHOULD BE REPLACED.',
self.new_footer)
def _generate_hash_for_tests(
unused_cls, unused_recipient_id, unused_email_subject,
unused_email_body):
"""Returns the generated hash for tests."""
return 'Email Hash'
self.generate_hash_ctx = self.swap(
email_models.SentEmailModel, '_generate_hash',
types.MethodType(
_generate_hash_for_tests, email_models.SentEmailModel))
def test_send_email_does_not_resend_if_same_hash_exists(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 1000)
logged_errors = []
def _log_error_for_tests(error_message):
"""Appends the error message to the logged errors list."""
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
cleaned_html_body = html_cleaner.clean(self.new_email_html_body)
raw_plaintext_body = cleaned_html_body.replace(
'<br/>', '\n').replace('<br>', '\n').replace(
'<li>', '<li>- ').replace('</p><p>', '</p>\n<p>')
cleaned_plaintext_body = html_cleaner.strip_html_tags(
raw_plaintext_body)
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject,
cleaned_plaintext_body, datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
email_manager.send_post_signup_email(
self.new_user_id, test_for_duplicate_email=True)
# An error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertRegex(logged_errors[0], 'Duplicate email')
# Check that a new email was not sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(0, len(messages))
# Check that the content of this email was not recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
def test_send_email_does_not_resend_within_duplicate_interval(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
logged_errors = []
def _log_error_for_tests(error_message):
"""Appends the error message to the logged errors list."""
logged_errors.append(error_message)
log_new_error_counter = test_utils.CallCounter(_log_error_for_tests)
log_new_error_ctx = self.swap(
email_manager, 'log_new_error', log_new_error_counter)
with can_send_emails_ctx, duplicate_email_ctx, log_new_error_ctx:
config_services.set_property(
self.admin_id, email_manager.EMAIL_SENDER_NAME.name,
'Email Sender')
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_manager._send_email( # pylint: disable=protected-access
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
# No error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 0)
email_manager._send_email( # pylint: disable=protected-access
self.new_user_id, feconf.SYSTEM_COMMITTER_ID,
feconf.EMAIL_INTENT_SIGNUP, 'Email Subject', 'Email Body',
feconf.SYSTEM_EMAIL_ADDRESS)
# An error should be recorded in the logs.
self.assertEqual(log_new_error_counter.times_called, 1)
self.assertRegex(logged_errors[0], 'Duplicate email')
# Check that a new email was not sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was not recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
def test_sending_email_with_different_recipient_but_same_hash(self):
"""Hash for both messages is same but recipients are different."""
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
'recipient_id', self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject,
self.new_email_html_body, datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
email_manager.send_post_signup_email(
self.new_user_id, test_for_duplicate_email=True)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertNotEqual(
sent_email_model1.recipient_id, sent_email_model2.recipient_id)
self.assertEqual(
sent_email_model1.subject, sent_email_model2.subject)
self.assertEqual(
sent_email_model1.html_body, sent_email_model2.html_body)
def test_sending_email_with_different_subject_but_same_hash(self):
"""Hash for both messages is same but subjects are different."""
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, '%s%s' % (
self.new_email_subject, 1), self.new_email_html_body,
datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
email_manager.send_post_signup_email(
self.new_user_id, test_for_duplicate_email=True)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertEqual(
sent_email_model1.recipient_id, sent_email_model2.recipient_id)
self.assertNotEqual(
sent_email_model1.subject, sent_email_model2.subject)
self.assertEqual(
sent_email_model1.html_body, sent_email_model2.html_body)
def test_sending_email_with_different_body_but_same_hash(self):
"""Hash for both messages is same but body is different."""
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx, self.generate_hash_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject,
'%s%s' % (self.new_email_html_body, 1),
datetime.datetime.utcnow())
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
email_manager.send_post_signup_email(
self.new_user_id, test_for_duplicate_email=True)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertEqual(
sent_email_model1.recipient_id, sent_email_model2.recipient_id)
self.assertEqual(
sent_email_model1.subject, sent_email_model2.subject)
self.assertNotEqual(
sent_email_model1.html_body, sent_email_model2.html_body)
def test_duplicate_emails_are_sent_after_some_time_has_elapsed(self):
can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
duplicate_email_ctx = self.swap(
feconf, 'DUPLICATE_EMAIL_INTERVAL_MINS', 2)
with can_send_emails_ctx, duplicate_email_ctx:
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
email_sent_time = (
datetime.datetime.utcnow() - datetime.timedelta(minutes=4))
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject,
self.new_email_html_body, email_sent_time)
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
email_sent_time = (
datetime.datetime.utcnow() - datetime.timedelta(minutes=2))
email_models.SentEmailModel.create(
self.new_user_id, self.NEW_USER_EMAIL,
feconf.SYSTEM_COMMITTER_ID, feconf.SYSTEM_EMAIL_ADDRESS,
feconf.EMAIL_INTENT_SIGNUP, self.new_email_subject,
self.new_email_html_body, email_sent_time)
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 2)
email_manager.send_post_signup_email(
self.new_user_id, test_for_duplicate_email=True)
# Check that a new email was sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(1, len(messages))
# Check that the content of this email was recorded in
# SentEmailModel.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 3)
# Check that the contents of the model are correct.
sent_email_model1 = all_models[0]
sent_email_model2 = all_models[1]
sent_email_model3 = all_models[2]
self.assertEqual(
sent_email_model1.email_hash, sent_email_model2.email_hash)
self.assertEqual(
sent_email_model1.email_hash, sent_email_model3.email_hash)
class FeedbackMessageBatchEmailTests(test_utils.EmailTestBase):
def setUp(self):
super(FeedbackMessageBatchEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.expected_email_subject = (
'You\'ve received 3 new messages on your explorations')
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
self.can_not_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False)
def test_email_not_sent_if_can_send_emails_is_false(self):
feedback_messages = {
self.exploration.id: {
'title': self.exploration.title,
'messages': ['Message 1.1', 'Message 1.2', 'Message 1.3']}
}
with self.can_not_send_emails_ctx:
email_manager.send_feedback_message_email(
self.editor_id, feedback_messages)
# Check that email is not sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self):
feedback_messages = {
self.exploration.id: {
'title': self.exploration.title,
'messages': ['Message 1.1', 'Message 1.2', 'Message 1.3']}
}
with self.can_send_emails_ctx, self.can_not_send_feedback_email_ctx:
email_manager.send_feedback_message_email(
self.editor_id, feedback_messages)
# Check that email is not sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_not_sent_if_feedback_messages_are_empty(self):
feedback_messages = {}
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_feedback_message_email(
self.editor_id, feedback_messages)
# Check that email is not sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_correct_email_body_is_sent(self):
expected_email_html_body = (
'Hi editor,<br>'
'<br>'
'You\'ve received 3 new messages on your Oppia explorations:<br>'
'<ul>'
'<li><a href="https://www.oppia.org/create/A#/feedback">Title</a>:'
'<br>'
'<ul><li>Message 1.1<br></li>'
'<li>Message 1.2<br></li>'
'<li>Message 1.3<br></li>'
'</ul></li></ul>'
'You can view and reply to your messages from your '
'<a href="https://www.oppia.org/creator-dashboard">dashboard</a>.'
'<br>'
'<br>Thanks, and happy teaching!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'\n'
'You\'ve received 3 new messages on your Oppia explorations:\n'
'- Title:\n'
'- Message 1.1\n'
'- Message 1.2\n'
'- Message 1.3\n'
'You can view and reply to your messages from your dashboard.\n'
'\n'
'Thanks, and happy teaching!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
feedback_messages = {
self.exploration.id: {
'title': self.exploration.title,
'messages': ['Message 1.1', 'Message 1.2', 'Message 1.3']}
}
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_feedback_message_email(
self.editor_id, feedback_messages)
# Check that email body is correct.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Check that email model is correct.
all_models = email_models.SentEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
self.assertEqual(sent_email_model.recipient_id, self.editor_id)
self.assertEqual(
sent_email_model.recipient_email, self.EDITOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION)
self.assertEqual(
sent_email_model.subject, self.expected_email_subject)
class SuggestionEmailTests(test_utils.EmailTestBase):
def setUp(self):
super(SuggestionEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.recipient_list = [self.editor_id]
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
self.can_not_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_suggestion_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.recipient_list)
# Check that email is not sent.
messages = self._get_sent_email_messages(
self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self):
with self.can_send_emails_ctx, self.can_not_send_feedback_email_ctx:
email_manager.send_suggestion_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.recipient_list)
# Check that email is not sent.
messages = self._get_sent_email_messages(
self.EDITOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_suggestion_emails_are_correct(self):
expected_email_subject = 'New suggestion for "Title"'
expected_email_html_body = (
'Hi editor,<br>'
'newuser has submitted a new suggestion for your Oppia '
'exploration, '
'<a href="https://www.oppia.org/create/A">"Title"</a>.<br>'
'You can accept or reject this suggestion by visiting the '
'<a href="https://www.oppia.org/create/A#/feedback">'
'feedback page</a> '
'for your exploration.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi editor,\n'
'newuser has submitted a new suggestion for your Oppia '
'exploration, "Title".\n'
'You can accept or reject this suggestion by visiting the '
'feedback page for your exploration.\n'
'\n'
'Thanks!\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_suggestion_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.recipient_list)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.EDITOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(sent_email_model.recipient_id, self.editor_id)
self.assertEqual(
sent_email_model.recipient_email, self.EDITOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_SUGGESTION_NOTIFICATION)
class SubscriptionEmailTests(test_utils.EmailTestBase):
def setUp(self):
super(SubscriptionEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
subscription_services.subscribe_to_creator(
self.new_user_id, self.editor_id)
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.can_send_subscription_email_ctx = self.swap(
feconf, 'CAN_SEND_SUBSCRIPTION_EMAILS', True)
self.can_not_send_subscription_email_ctx = self.swap(
feconf, 'CAN_SEND_SUBSCRIPTION_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_emails_to_subscribers(
self.editor_id, self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_email_not_sent_if_can_send_subscription_emails_is_false(self):
with self.can_send_emails_ctx, self.can_not_send_subscription_email_ctx:
email_manager.send_emails_to_subscribers(
self.editor_id, self.exploration.id, self.exploration.title)
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_subscription_emails_are_correct(self):
expected_email_subject = 'editor has published a new exploration!'
expected_email_html_body = (
'Hi newuser,<br>'
'<br>'
'editor has published a new exploration! You can play it here: '
'<a href="https://www.oppia.org/explore/A">Title</a><br>'
'<br>'
'Thanks, and happy learning!<br>'
'<br>'
'Best wishes,<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'editor has published a new exploration! You can play it here: '
'Title\n'
'\n'
'Thanks, and happy learning!\n'
'\n'
'Best wishes,\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_subscription_email_ctx:
email_manager.send_emails_to_subscribers(
self.editor_id, self.exploration.id, self.exploration.title)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(sent_email_model.recipient_id, self.new_user_id)
self.assertEqual(
sent_email_model.recipient_email, self.NEW_USER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_SUBSCRIPTION_NOTIFICATION)
class FeedbackMessageInstantEmailTests(test_utils.EmailTestBase):
def setUp(self):
super(FeedbackMessageInstantEmailTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.recipient_list = [self.editor_id]
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.can_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', True)
self.can_not_send_feedback_email_ctx = self.swap(
feconf, 'CAN_SEND_FEEDBACK_MESSAGE_EMAILS', False)
def test_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_instant_feedback_message_email(
self.new_user_id, self.editor_id, 'editor message',
'New Oppia message in "a subject"', self.exploration.title,
self.exploration.id, 'a subject')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_email_not_sent_if_can_send_feedback_message_emails_is_false(self):
with self.can_send_emails_ctx, self.can_not_send_feedback_email_ctx:
email_manager.send_instant_feedback_message_email(
self.new_user_id, self.editor_id, 'editor message',
'New Oppia message in "a subject"', self.exploration.title,
self.exploration.id, 'a subject')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_feedback_message_emails_are_correct(self):
expected_email_subject = 'New Oppia message in "a subject"'
expected_email_html_body = (
'Hi newuser,<br><br>'
'New update to thread "a subject" on '
'<a href="https://www.oppia.org/create/A#/feedback">Title</a>:<br>'
'<ul><li>editor: editor message<br></li></ul>'
'(You received this message because you are a '
'participant in this thread.)<br><br>'
'Best wishes,<br>'
'The Oppia team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi newuser,\n'
'\n'
'New update to thread "a subject" on Title:\n'
'- editor: editor message\n'
'(You received this message because you are a'
' participant in this thread.)\n'
'\n'
'Best wishes,\n'
'The Oppia team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx, self.can_send_feedback_email_ctx:
email_manager.send_instant_feedback_message_email(
self.new_user_id, self.editor_id, 'editor message',
'New Oppia message in "a subject"', self.exploration.title,
self.exploration.id, 'a subject')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.NEW_USER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(sent_email_model.recipient_id, self.new_user_id)
self.assertEqual(
sent_email_model.recipient_email, self.NEW_USER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_FEEDBACK_MESSAGE_NOTIFICATION)
class FlagExplorationEmailTest(test_utils.EmailTestBase):
"""Test that emails are sent to moderators when explorations are flagged."""
def setUp(self):
super(FlagExplorationEmailTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.moderator2_email = 'moderator2@example.com'
self.moderator2_username = 'moderator2'
self.signup(self.moderator2_email, self.moderator2_username)
self.moderator2_id = self.get_user_id_from_email(self.moderator2_email)
self.set_moderators([self.moderator2_username, self.MODERATOR_USERNAME])
self.exploration = self.save_new_default_exploration(
'A', self.editor_id, title='Title')
self.owner_ids = [self.editor_id]
self.report_text = 'AD'
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_flag_exploration_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.report_text)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.MODERATOR_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_flag_exploration_emails_are_correct(self):
expected_email_subject = 'Exploration flagged by user: "Title"'
expected_email_html_body = (
'Hello Moderator,<br>'
'newuser has flagged exploration "Title" on the following '
'grounds: <br>'
'AD .<br>'
'You can modify the exploration by clicking '
'<a href="https://www.oppia.org/create/A">'
'here</a>.<br>'
'<br>'
'Thanks!<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hello Moderator,\n'
'newuser has flagged exploration "Title" on the following '
'grounds: \n'
'AD .\n'
'You can modify the exploration by clicking here.\n'
'\n'
'Thanks!\n'
'- The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx:
email_manager.send_flag_exploration_email(
self.exploration.title, self.exploration.id, self.new_user_id,
self.report_text)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.MODERATOR_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email is sent to multiple moderators.
messages = self._get_sent_email_messages(self.moderator2_email)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email models are stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = next(
m for m in all_models if m.recipient_id == self.moderator_id)
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_email, self.MODERATOR_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_REPORT_BAD_CONTENT)
sent_email_model = next(
m for m in all_models if m.recipient_id == self.moderator2_id)
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_email, self.moderator2_email)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_REPORT_BAD_CONTENT)
class OnboardingReviewerInstantEmailTests(test_utils.EmailTestBase):
"""Test that correct email is sent while onboarding reviewers."""
REVIEWER_USERNAME = 'reviewer'
REVIEWER_EMAIL = 'reviewer@example.com'
def setUp(self):
super(OnboardingReviewerInstantEmailTests, self).setUp()
self.signup(self.REVIEWER_EMAIL, self.REVIEWER_USERNAME)
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
user_services.update_email_preferences(
self.reviewer_id, True, False, False, False)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_mail_to_onboard_new_reviewers(
self.reviewer_id, 'Algebra')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_correct_completion_email_is_sent(self):
expected_email_subject = 'Invitation to review suggestions'
expected_email_html_body = (
'Hi reviewer,<br><br>'
'Thank you for actively contributing high-quality suggestions for '
'Oppia\'s lessons in Algebra, and for helping to make these lessons'
' better for students around the world!<br><br>'
'In recognition of your contributions, we would like to invite you'
' to become one of Oppia\'s reviewers. As a reviewer, you will be '
'able to review suggestions in Algebra, and contribute to helping '
'ensure that any edits made to lessons preserve the lessons\' '
'quality and are beneficial for students.<br><br>'
'If you\'d like to help out as a reviewer, please visit your '
'<a href="https://www.oppia.org/creator-dashboard/">dashboard</a>. '
'and set your review preferences accordingly. Note that, if you '
'accept,you will receive occasional emails inviting you to review '
'incoming suggestions by others.<br><br>'
'Again, thank you for your contributions to the Oppia '
'community!<br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
with self.can_send_emails_ctx:
email_manager.send_mail_to_onboard_new_reviewers(
self.reviewer_id, 'Algebra')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(sent_email_model.recipient_id, self.reviewer_id)
self.assertEqual(
sent_email_model.recipient_email, self.REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER)
class NotifyReviewerInstantEmailTests(test_utils.EmailTestBase):
"""Test that correct email is sent while notifying reviewers."""
REVIEWER_USERNAME = 'reviewer'
REVIEWER_EMAIL = 'reviewer@example.com'
def setUp(self):
super(NotifyReviewerInstantEmailTests, self).setUp()
self.signup(self.REVIEWER_EMAIL, self.REVIEWER_USERNAME)
self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL)
user_services.update_email_preferences(
self.reviewer_id, True, False, False, False)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_mail_to_notify_users_to_review(
self.reviewer_id, 'Algebra')
messages = self._get_sent_email_messages(
self.REVIEWER_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_correct_completion_email_is_sent(self):
expected_email_subject = 'Notification to review suggestions'
expected_email_html_body = (
'Hi reviewer,<br><br>'
'Just a heads-up that there are new suggestions to '
'review in Algebra, which you are registered as a reviewer for.'
'<br><br>Please take a look at and accept/reject these suggestions '
'at your earliest convenience. You can visit your '
'<a href="https://www.oppia.org/creator-dashboard/">dashboard</a> '
'to view the list of suggestions that need a review.<br><br>'
'Thank you for helping improve Oppia\'s lessons!'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_users_to_review(
self.reviewer_id, 'Algebra')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(sent_email_model.subject, expected_email_subject)
self.assertEqual(sent_email_model.recipient_id, self.reviewer_id)
self.assertEqual(
sent_email_model.recipient_email, self.REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_REVIEW_CREATOR_DASHBOARD_SUGGESTIONS)
class NotifyContributionDashboardReviewersEmailTests(test_utils.EmailTestBase):
"""Tests the send_mail_to_notify_contributor_dashboard_reviewers method,
which sends an email to reviewers with information regarding the suggestions
that have waited the longest for review.
"""
target_id = 'exp1'
skill_id = 'skill_123456'
mocked_review_submission_datetime = datetime.datetime(2020, 6, 15, 5)
AUTHOR_USERNAME = 'author'
AUTHOR_EMAIL = 'author@example.com'
REVIEWER_1_USERNAME = 'reviewer1'
REVIEWER_1_EMAIL = 'reviewer1@community.org'
REVIEWER_2_USERNAME = 'reviewer2'
REVIEWER_2_EMAIL = 'reviewer2@community.org'
def _create_translation_suggestion_in_lang_with_html_and_datetime(
self, language_code, translation_html, submission_datetime):
"""Creates a translation suggestion in the given language_code with the
given translation html and submission datetime.
"""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': translation_html,
'data_format': 'html'
}
translation_suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
feconf.ENTITY_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description')
translation_suggestion.last_updated = submission_datetime
return translation_suggestion
def _create_question_suggestion_with_question_html_and_datetime(
self, question_html, submission_datetime):
"""Creates a question suggestion with the given question html and
submission datetime.
"""
with self.swap(
feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html):
add_question_change_dict = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
question_suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description')
question_suggestion.last_updated = submission_datetime
return question_suggestion
def _create_reviewable_suggestion_email_infos_from_suggestions(
self, suggestions):
"""Creates a list of ReviewableSuggestionEmailInfo objects from
the given suggestions.
"""
return [
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
suggestion)
) for suggestion in suggestions
]
def _assert_email_data_stored_in_sent_email_model_is_correct(
self, expected_email_html_body, reviewer_id, reviewer_email):
"""Asserts that the created sent email model from the sent email
contains the right information.
"""
sent_email_models = email_models.SentEmailModel.get_all().filter(
email_models.SentEmailModel.recipient_id == reviewer_id).fetch()
self.assertEqual(len(sent_email_models), 1)
sent_email_model = sent_email_models[0]
self.assertEqual(
sent_email_model.subject,
email_manager
.CONTRIBUTOR_DASHBOARD_REVIEWER_NOTIFICATION_EMAIL_DATA[
'email_subject'])
self.assertEqual(
sent_email_model.recipient_id, reviewer_id)
self.assertEqual(
sent_email_model.recipient_email, reviewer_email)
self.assertEqual(
sent_email_model.html_body, expected_email_html_body)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_REVIEW_CONTRIBUTOR_DASHBOARD_SUGGESTIONS)
def _log_error_for_tests(self, error_message):
"""Appends the error message to the logged errors list."""
self.logged_errors.append(error_message)
def _mock_logging_info(self, msg, *args):
"""Mocks logging.info() by appending the log message to the logged info
list.
"""
self.logged_info.append(msg % args)
def setUp(self):
super(NotifyContributionDashboardReviewersEmailTests, self).setUp()
self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(self.REVIEWER_1_EMAIL, self.REVIEWER_1_USERNAME)
self.reviewer_1_id = self.get_user_id_from_email(self.REVIEWER_1_EMAIL)
user_services.update_email_preferences(
self.reviewer_1_id, True, False, False, False)
self.signup(self.REVIEWER_2_EMAIL, self.REVIEWER_2_USERNAME)
self.reviewer_2_id = self.get_user_id_from_email(self.REVIEWER_2_EMAIL)
user_services.update_email_preferences(
self.reviewer_2_id, True, False, False, False)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.cannot_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.logged_errors = []
self.log_new_error_counter = test_utils.CallCounter(
self._log_error_for_tests)
self.log_new_error_ctx = self.swap(
email_manager, 'log_new_error', self.log_new_error_counter)
self.logged_info = []
self.log_new_info_ctx = self.swap(
logging, 'info', self._mock_logging_info)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
self.reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
def test_email_not_sent_if_can_send_emails_is_false(self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
with self.cannot_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id], [[self.reviewable_suggestion_email_info]]
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0], 'This app cannot send emails to users.')
def test_email_not_sent_if_reviewer_notifications_is_disabled(self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', False)
with self.can_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id], [[self.reviewable_suggestion_email_info]]
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'The "contributor_dashboard_reviewer_emails_is_enabled" property '
'must be enabled on the admin config page in order to send '
'reviewers the emails.')
def test_email_not_sent_if_reviewer_email_does_not_exist(self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
with self.can_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_contributor_dashboard_reviewers(
['reviewer_id_with_no_email'],
[[self.reviewable_suggestion_email_info]]
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'There was no email for the given reviewer id: '
'reviewer_id_with_no_email.')
def test_email_not_sent_if_no_reviewers_to_notify(self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
with self.can_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_contributor_dashboard_reviewers(
[], [[self.reviewable_suggestion_email_info]]
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'No Contributor Dashboard reviewers to notify.')
def test_email_not_sent_if_no_suggestions_to_notify_the_reviewer_about(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
with self.can_send_emails_ctx, self.log_new_info_ctx:
email_manager.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id], [[]]
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(
self.logged_info[0],
'There were no suggestions to recommend to the reviewer with user '
'id: %s.' % self.reviewer_1_id)
def test_email_sent_to_reviewer_with_question_waiting_a_day_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 1
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'day ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_question_waiting_days_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 5 '
'days ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_question_waiting_an_hour_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 1
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(hours=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'hour ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_question_waiting_hours_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(hours=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 5 '
'hours ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(
messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_question_waiting_a_minute_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 1
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(minutes=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'minute ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_question_waiting_minutes_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(minutes=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 5 '
'minutes ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_question_waiting_seconds_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(seconds=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'minute ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_multi_questions_waiting_for_a_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1))
# Question suggestion 1 has waited 1 day for review.
question_suggestion_1 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1)))
# Question suggestion 2 has waited 1 hour for review.
question_suggestion_2 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1)))
reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_1, question_suggestion_2]))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'day ago:'
'<br>Question 1</li><br>'
'<li>The following question suggestion was submitted for review 1 '
'hour ago:'
'<br>Question 2</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[reviewable_suggestion_email_infos])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_multi_reviewers_with_multi_question_suggestions(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1, minutes=1))
# Question suggestion 1 has waited 1 day for review.
question_suggestion_1 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 1 for reviewer 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1, minutes=1)))
# Question suggestion 2 has waited 1 hour for review.
question_suggestion_2 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 2 for reviewer 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, minutes=1)))
# Question suggestion 3 has waited 1 minute for review.
question_suggestion_3 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 1 for reviewer 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1)))
# Question suggestion 4 has waited 1 minute for review.
question_suggestion_4 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 2 for reviewer 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1)))
reviewer_1_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_1, question_suggestion_2]))
reviewer_2_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_3, question_suggestion_4]))
expected_email_html_body_reviewer_1 = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'day ago:'
'<br>Question 1 for reviewer 1</li><br>'
'<li>The following question suggestion was submitted for review 1 '
'hour ago:'
'<br>Question 2 for reviewer 1</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value)
)
expected_email_html_body_reviewer_2 = (
'Hi reviewer2,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 1 '
'minute ago:'
'<br>Question 1 for reviewer 2</li><br>'
'<li>The following question suggestion was submitted for review 1 '
'minute ago:'
'<br>Question 2 for reviewer 2</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id, self.reviewer_2_id],
[
reviewer_1_suggestion_email_infos,
reviewer_2_suggestion_email_infos
])
)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_reviewer_1)
messages = self._get_sent_email_messages(self.REVIEWER_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_reviewer_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_reviewer_1, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_reviewer_2, self.reviewer_2_id,
self.REVIEWER_2_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_a_day_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 1
reviewable_suggestion_email_info.submission_datetime = (
self.mocked_review_submission_datetime)
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 1 day ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_days_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 5 days ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_an_hour_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 1
reviewable_suggestion_email_info.submission_datetime = (
self.mocked_review_submission_datetime)
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(hours=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 1 hour ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_hours_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 5
reviewable_suggestion_email_info.submission_datetime = (
self.mocked_review_submission_datetime)
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(hours=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 5 hours ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_a_min_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 1
reviewable_suggestion_email_info.submission_datetime = (
self.mocked_review_submission_datetime)
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(minutes=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 1 minute ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_mins_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 5
reviewable_suggestion_email_info.submission_datetime = (
self.mocked_review_submission_datetime)
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(minutes=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 5 minutes ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_translation_waiting_secs_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 1
reviewable_suggestion_email_info.submission_datetime = (
self.mocked_review_submission_datetime)
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(seconds=review_wait_time))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 1 minute ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[[reviewable_suggestion_email_info]])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_reviewer_with_multi_translation_waiting_for_review(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1))
# Translation suggestion 1 has waited 1 day for review.
translation_suggestion_1 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'en', '<p>Translation 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1)))
# Translation suggestion 2 has waited 1 hour for review.
translation_suggestion_2 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'fr', '<p>Translation 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1)))
reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1, translation_suggestion_2]))
expected_email_html_body = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following English translation suggestion was submitted '
'for review 1 day ago:'
'<br>Translation 1</li><br>'
'<li>The following French translation suggestion was submitted for '
'review 1 hour ago:'
'<br>Translation 2</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id],
[reviewable_suggestion_email_infos])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
def test_email_sent_to_multi_reviewers_with_multi_translations_suggestions(
self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1, minutes=1))
# Translation suggestion 1 has waited 1 day for review.
translation_suggestion_1 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'en', '<p>Translation 1 for reviewer 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1, minutes=1)))
# Translation suggestion 2 has waited 1 hour for review.
translation_suggestion_2 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'fr', '<p>Translation 2 for reviewer 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, minutes=1)))
# Translation suggestion 3 has waited 1 minute for review.
translation_suggestion_3 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Translation 1 for reviewer 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1)))
# Translation suggestion 4 has waited 1 minute for review.
translation_suggestion_4 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'fr', '<p>Translation 2 for reviewer 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1)))
reviewer_1_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1, translation_suggestion_2]))
reviewer_2_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_3, translation_suggestion_4]))
expected_email_html_body_reviewer_1 = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following English translation suggestion was submitted '
'for review 1 day ago:'
'<br>Translation 1 for reviewer 1</li><br>'
'<li>The following French translation suggestion was submitted for '
'review 1 hour ago:'
'<br>Translation 2 for reviewer 1</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value)
)
expected_email_html_body_reviewer_2 = (
'Hi reviewer2,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 1 minute ago:'
'<br>Translation 1 for reviewer 2</li><br>'
'<li>The following French translation suggestion was submitted for '
'review 1 minute ago:'
'<br>Translation 2 for reviewer 2</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id, self.reviewer_2_id],
[
reviewer_1_suggestion_email_infos,
reviewer_2_suggestion_email_infos
])
)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_reviewer_1)
messages = self._get_sent_email_messages(self.REVIEWER_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_reviewer_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_reviewer_1, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_reviewer_2, self.reviewer_2_id,
self.REVIEWER_2_EMAIL)
def test_email_sent_to_multi_reviewers_with_multi_suggestions_waiting(self):
config_services.set_property(
'committer_id',
'contributor_dashboard_reviewer_emails_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1, minutes=1))
# Suggestion 1 has waited 1 day for review.
suggestion_1 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'en', '<p>Translation 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1, minutes=1)))
# Suggestion 2 has waited 1 hour for review.
suggestion_2 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, minutes=1)))
# Suggestion 3 has waited 1 minute for review.
suggestion_3 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'fr', '<p>Translation 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1)))
# Suggestion 4 has waited 1 minute for review.
suggestion_4 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
days=1, hours=1)))
reviewer_1_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[suggestion_1, suggestion_2]))
reviewer_2_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[suggestion_3, suggestion_4]))
expected_email_html_body_reviewer_1 = (
'Hi reviewer1,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following English translation suggestion was submitted '
'for review 1 day ago:'
'<br>Translation 1</li><br>'
'<li>The following question suggestion was submitted for '
'review 1 hour ago:'
'<br>Question 1</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value)
)
expected_email_html_body_reviewer_2 = (
'Hi reviewer2,'
'<br><br>'
'There are new review opportunities that we think you might be '
'interested in on the <a href="%s%s">Contributor Dashboard</a>. '
'Here are some examples of contributions that have been waiting '
'the longest for review:<br><br>'
'<ul>'
'<li>The following French translation suggestion was submitted for '
'review 1 minute ago:'
'<br>Translation 2</li><br>'
'<li>The following question suggestion was submitted for '
'review 1 minute ago:'
'<br>Question 2</li><br>'
'</ul><br>'
'Please take some time to review any of the above contributions '
'(if they still need a review) or any other contributions on the '
'dashboard. We appreciate your help!'
'<br><br>'
'Thanks again, and happy reviewing!<br>'
'- The Oppia Contributor Dashboard Team'
'<br><br>%s' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
email_manager.EMAIL_FOOTER.value))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_contributor_dashboard_reviewers(
[self.reviewer_1_id, self.reviewer_2_id],
[
reviewer_1_suggestion_email_infos,
reviewer_2_suggestion_email_infos
])
)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.REVIEWER_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_reviewer_1)
messages = self._get_sent_email_messages(self.REVIEWER_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_reviewer_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_reviewer_1, self.reviewer_1_id,
self.REVIEWER_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_reviewer_2, self.reviewer_2_id,
self.REVIEWER_2_EMAIL)
class NotifyAdminsSuggestionsWaitingTooLongForReviewEmailTests(
test_utils.EmailTestBase):
"""Tests the send_mail_to_notify_admins_suggestions_waiting_long method,
which sends an email to admins with information regarding the suggestions
that have waited longer than
suggestion_models.SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS days for
review on the Contributor Dashboard.
"""
target_id = 'exp1'
skill_id = 'skill_123456'
mocked_review_submission_datetime = datetime.datetime(2020, 6, 15, 5)
AUTHOR_USERNAME = 'author'
AUTHOR_EMAIL = 'author@example.com'
CURRICULUM_ADMIN_1_USERNAME = 'user1'
CURRICULUM_ADMIN_1_EMAIL = 'user1@community.org'
CURRICULUM_ADMIN_2_USERNAME = 'user2'
CURRICULUM_ADMIN_2_EMAIL = 'user2@community.org'
def _create_translation_suggestion_in_lang_with_html_and_datetime(
self, language_code, translation_html, submission_datetime):
"""Creates a translation suggestion in the given language_code with the
given translation html and submission datetime.
"""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': translation_html,
'data_format': 'html'
}
with self.mock_datetime_utcnow(submission_datetime):
translation_suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
feconf.ENTITY_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description')
return translation_suggestion
def _create_question_suggestion_with_question_html_and_datetime(
self, question_html, submission_datetime):
"""Creates a question suggestion with the given question html and
submission datetime.
"""
with self.swap(
feconf, 'DEFAULT_INIT_STATE_CONTENT_STR', question_html):
add_question_change_dict = {
'cmd': (
question_domain
.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION),
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
with self.mock_datetime_utcnow(submission_datetime):
question_suggestion = suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description')
return question_suggestion
def _create_reviewable_suggestion_email_infos_from_suggestions(
self, suggestions):
"""Creates a list of ReviewableSuggestionEmailInfo objects from
the given suggestions.
"""
return [
(
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
suggestion)
) for suggestion in suggestions
]
def _assert_email_data_stored_in_sent_email_model_is_correct(
self, expected_email_html_body, admin_id, admin_email):
"""Asserts that the created sent email model from the sent email
contains the right information.
"""
sent_email_models = email_models.SentEmailModel.get_all().filter(
email_models.SentEmailModel.recipient_id == admin_id).fetch()
self.assertEqual(len(sent_email_models), 1)
sent_email_model = sent_email_models[0]
self.assertEqual(
sent_email_model.subject, (
email_manager
.ADMIN_NOTIFICATION_FOR_SUGGESTIONS_NEEDING_REVIEW_EMAIL_DATA[
'email_subject']
))
self.assertEqual(
sent_email_model.recipient_id, admin_id)
self.assertEqual(
sent_email_model.recipient_email, admin_email)
self.assertEqual(
sent_email_model.html_body, expected_email_html_body)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_ADDRESS_CONTRIBUTOR_DASHBOARD_SUGGESTIONS)
def _log_error_for_tests(self, error_message):
"""Appends the error message to the logged errors list."""
self.logged_errors.append(error_message)
def _mock_logging_info(self, msg, *args):
"""Mocks logging.info() by appending the log message to the logged info
list.
"""
self.logged_info.append(msg % args)
def setUp(self):
super(
NotifyAdminsSuggestionsWaitingTooLongForReviewEmailTests,
self).setUp()
self.signup(self.AUTHOR_EMAIL, self.AUTHOR_USERNAME)
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(
self.CURRICULUM_ADMIN_1_EMAIL, self.CURRICULUM_ADMIN_1_USERNAME)
self.admin_1_id = self.get_user_id_from_email(
self.CURRICULUM_ADMIN_1_EMAIL)
self.signup(
self.CURRICULUM_ADMIN_2_EMAIL, self.CURRICULUM_ADMIN_2_USERNAME)
self.admin_2_id = self.get_user_id_from_email(
self.CURRICULUM_ADMIN_2_EMAIL)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.cannot_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.logged_errors = []
self.log_new_error_counter = test_utils.CallCounter(
self._log_error_for_tests)
self.log_new_error_ctx = self.swap(
email_manager, 'log_new_error', self.log_new_error_counter)
self.logged_info = []
self.log_new_info_ctx = self.swap(
logging, 'info', self._mock_logging_info)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
self.reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
def test_email_not_sent_if_can_send_emails_is_false(self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
with self.cannot_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id],
[],
[],
[self.reviewable_suggestion_email_info])
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0], 'This app cannot send emails to users.')
def test_email_not_sent_if_notifying_admins_about_suggestions_is_disabled(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', False)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [],
[self.reviewable_suggestion_email_info])
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'The "notify_admins_suggestions_waiting_too_long" property '
'must be enabled on the admin config page in order to send '
'admins the emails.')
def test_email_not_sent_if_admin_email_does_not_exist(self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
['admin_id_without_email'], [], [],
[self.reviewable_suggestion_email_info])
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'There was no email for the given admin id: admin_id_without_email.'
)
def test_email_not_sent_if_no_admins_to_notify(self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[], [], [], [self.reviewable_suggestion_email_info])
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0], 'There were no admins to notify.')
def test_email_not_sent_if_no_suggestions_to_notify_the_admin_about(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
with self.can_send_emails_ctx, self.log_new_info_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [], [])
)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(
self.logged_info[0],
'There were no Contributor Dashboard suggestions that were waiting '
'too long for a review.')
def test_email_sent_to_admin_if_question_has_waited_too_long_for_a_review(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 5 '
'days ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL)
)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [],
[reviewable_suggestion_email_info])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_email_sent_to_admin_if_multiple_questions_have_waited_for_review(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=2, hours=1))
question_suggestion_1 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 1</p>',
self.mocked_review_submission_datetime))
# Question suggestion 2 has waited slighlty less time than question
# suggestion 1 so that the question suggestions are not
# indistinguishable in terms of review wait time.
question_suggestion_2 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1)))
reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[question_suggestion_1, question_suggestion_2]))
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 2 '
'days ago:'
'<br>Question 1</li><br>'
'<li>The following question suggestion was submitted for review 2 '
'days ago:'
'<br>Question 2</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL)
)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [],
reviewable_suggestion_email_infos)
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_email_sent_to_admin_if_translation_has_waited_too_long_for_review(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
translation_suggestion = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'hi', '<p>Sample translation</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
translation_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following Hindi translation suggestion was submitted for '
'review 5 days ago:'
'<br>Sample translation</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL)
)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [],
[reviewable_suggestion_email_info])
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_email_sent_to_admin_if_multi_translations_have_waited_for_review(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=2, hours=1))
translation_suggestion_1 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'en', '<p>Translation 1</p>',
self.mocked_review_submission_datetime))
# Translation suggestion 2 has waited slighlty less time than
# translation suggestion 1 so that the translation suggestions are not
# indistinguishable in terms of review wait time.
translation_suggestion_2 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'fr', '<p>Translation 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1)))
reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[translation_suggestion_1, translation_suggestion_2]))
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following English translation suggestion was submitted '
'for review 2 days ago:'
'<br>Translation 1</li><br>'
'<li>The following French translation suggestion was submitted for '
'review 2 days ago:'
'<br>Translation 2</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL)
)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [],
reviewable_suggestion_email_infos)
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_email_sent_to_admin_if_multi_suggestion_types_waiting_for_review(
self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
mocked_datetime_for_utcnow = (
self.mocked_review_submission_datetime + datetime.timedelta(
days=2, hours=1, minutes=5))
suggestion_1 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'en', '<p>Translation 1</p>',
self.mocked_review_submission_datetime))
# Suggestion 2 has waited slighlty less time than suggestion 1 so that
# the suggestions are not indistinguishable in terms of review wait
# time.
suggestion_2 = (
self._create_translation_suggestion_in_lang_with_html_and_datetime(
'fr', '<p>Translation 2</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
minutes=5)))
# Similarly, suggestion 3 has waited less than both suggestion 1 and
# suggestion 2 so that the suggestions are not indistinguishable in
# terms of review wait time.
suggestion_3 = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>Question 1</p>',
self.mocked_review_submission_datetime + datetime.timedelta(
hours=1)))
reviewable_suggestion_email_infos = (
self._create_reviewable_suggestion_email_infos_from_suggestions(
[suggestion_1, suggestion_2, suggestion_3]))
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following English translation suggestion was submitted '
'for review 2 days ago:'
'<br>Translation 1</li><br>'
'<li>The following French translation suggestion was submitted for '
'review 2 days ago:'
'<br>Translation 2</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL)
)
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id], [], [],
reviewable_suggestion_email_infos)
)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
messages.sort(key=lambda m: m.html)
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
sent_email_models = email_models.SentEmailModel.get_all().filter(
email_models.SentEmailModel.recipient_id == self.admin_1_id).fetch()
self.assertEqual(len(sent_email_models), 2)
sent_email_models.sort(key=lambda m: m.html_body)
sent_email_model = sent_email_models[0]
self.assertEqual(
sent_email_model.subject,
email_manager
.ADMIN_NOTIFICATION_FOR_SUGGESTIONS_NEEDING_REVIEW_EMAIL_DATA[
'email_subject'])
self.assertEqual(
sent_email_model.recipient_id, self.admin_1_id)
self.assertEqual(
sent_email_model.recipient_email, self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(
sent_email_model.html_body, expected_email_html_body)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_ADDRESS_CONTRIBUTOR_DASHBOARD_SUGGESTIONS)
def test_email_sent_to_multiple_admins(self):
config_services.set_property(
'committer_id',
'notify_admins_suggestions_waiting_too_long_is_enabled', True)
question_suggestion = (
self._create_question_suggestion_with_question_html_and_datetime(
'<p>What is the meaning of life?</p>',
self.mocked_review_submission_datetime))
reviewable_suggestion_email_info = (
suggestion_services
.create_reviewable_suggestion_email_info_from_suggestion(
question_suggestion))
review_wait_time = 5
mocked_datetime_for_utcnow = (
reviewable_suggestion_email_info.submission_datetime +
datetime.timedelta(days=review_wait_time))
expected_email_html_body_admin_1 = (
'Hi user1,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 5 '
'days ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL)
)
expected_email_html_body_admin_2 = (
'Hi user2,'
'<br><br>'
'There are suggestions on the '
'<a href="%s%s">Contributor Dashboard</a> that have been waiting '
'for more than 0 days for review. Please take a look at the '
'suggestions mentioned below and help them get reviewed by going '
'to the <a href="%s%s#/roles">admin roles page</a> and either:'
'<br><br><ul>'
'<li>Add more reviewers to the suggestion types that have '
'suggestions waiting too long for a review</li><br>'
'<li>Find the existing reviewers and email reviewers directly '
'about the suggestions waiting for a review</li><br>'
'</ul><br>'
'Here are the suggestions that have been waiting too long for a '
'review:'
'<br><br>'
'<ul>'
'<li>The following question suggestion was submitted for review 5 '
'days ago:'
'<br>What is the meaning of life?</li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br>'
'Best Wishes!<br><br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL,
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL))
with self.can_send_emails_ctx, self.log_new_error_ctx:
with self.swap(
suggestion_models,
'SUGGESTION_REVIEW_WAIT_TIME_THRESHOLD_IN_DAYS', 0):
with self.mock_datetime_utcnow(mocked_datetime_for_utcnow):
(
email_manager
.send_mail_to_notify_admins_suggestions_waiting_long(
[self.admin_1_id, self.admin_2_id], [], [],
[reviewable_suggestion_email_info])
)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_admin_1)
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_admin_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_admin_1, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_admin_2, self.admin_2_id,
self.CURRICULUM_ADMIN_2_EMAIL)
class NotifyAdminsContributorDashboardReviewersNeededTests(
test_utils.EmailTestBase):
"""Test emailing admins that Contributor Dashboard reviewers are needed in
specific suggestion types.
"""
CURRICULUM_ADMIN_1_USERNAME = 'user1'
CURRICULUM_ADMIN_1_EMAIL = 'user1@community.org'
CURRICULUM_ADMIN_2_USERNAME = 'user2'
CURRICULUM_ADMIN_2_EMAIL = 'user2@community.org'
AUTHOR_EMAIL = 'author@example.com'
target_id = 'exp1'
skill_id = 'skill_123456'
def _create_translation_suggestion_with_language_code(self, language_code):
"""Creates a translation suggestion in the given language_code."""
add_translation_change_dict = {
'cmd': exp_domain.CMD_ADD_WRITTEN_TRANSLATION,
'state_name': feconf.DEFAULT_INIT_STATE_NAME,
'content_id': feconf.DEFAULT_NEW_STATE_CONTENT_ID,
'language_code': language_code,
'content_html': feconf.DEFAULT_INIT_STATE_CONTENT_STR,
'translation_html': '<p>This is the translated content.</p>',
'data_format': 'html'
}
return suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT,
feconf.ENTITY_TYPE_EXPLORATION,
self.target_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_translation_change_dict,
'test description'
)
def _create_question_suggestion(self):
"""Creates a question suggestion."""
add_question_change_dict = {
'cmd': question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION,
'question_dict': {
'question_state_data': self._create_valid_question_data(
'default_state').to_dict(),
'language_code': constants.DEFAULT_LANGUAGE_CODE,
'question_state_data_schema_version': (
feconf.CURRENT_STATE_SCHEMA_VERSION),
'linked_skill_ids': ['skill_1'],
'inapplicable_skill_misconception_ids': ['skillid12345-1']
},
'skill_id': self.skill_id,
'skill_difficulty': 0.3
}
return suggestion_services.create_suggestion(
feconf.SUGGESTION_TYPE_ADD_QUESTION,
feconf.ENTITY_TYPE_SKILL,
self.skill_id, feconf.CURRENT_STATE_SCHEMA_VERSION,
self.author_id, add_question_change_dict,
'test description'
)
def _assert_email_data_stored_in_sent_email_model_is_correct(
self, expected_email_html_body, admin_id, admin_email):
"""Asserts that the sent email model that was created from the email
that was sent contains the right information.
"""
sent_email_models = email_models.SentEmailModel.get_all().filter(
email_models.SentEmailModel.recipient_id == admin_id).fetch()
self.assertEqual(len(sent_email_models), 1)
sent_email_model = sent_email_models[0]
self.assertEqual(
sent_email_model.subject,
email_manager.ADMIN_NOTIFICATION_FOR_REVIEWER_SHORTAGE_EMAIL_DATA[
'email_subject'])
self.assertEqual(
sent_email_model.recipient_id, admin_id)
self.assertEqual(
sent_email_model.recipient_email, admin_email)
self.assertEqual(
sent_email_model.html_body, expected_email_html_body)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_ADD_CONTRIBUTOR_DASHBOARD_REVIEWERS)
def _log_error_for_tests(self, error_message):
"""Appends the error message to the logged errors list."""
self.logged_errors.append(error_message)
def _mock_logging_info(self, msg, *args):
"""Mocks logging.info() by appending the log message to the logged info
list.
"""
self.logged_info.append(msg % args)
def setUp(self):
super(
NotifyAdminsContributorDashboardReviewersNeededTests,
self).setUp()
self.signup(self.AUTHOR_EMAIL, 'author')
self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL)
self.signup(
self.CURRICULUM_ADMIN_1_EMAIL, self.CURRICULUM_ADMIN_1_USERNAME)
self.admin_1_id = self.get_user_id_from_email(
self.CURRICULUM_ADMIN_1_EMAIL)
self.signup(
self.CURRICULUM_ADMIN_2_EMAIL, self.CURRICULUM_ADMIN_2_USERNAME)
self.admin_2_id = self.get_user_id_from_email(
self.CURRICULUM_ADMIN_2_EMAIL)
self.save_new_valid_exploration(self.target_id, self.author_id)
self.save_new_skill(self.skill_id, self.author_id)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.cannot_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.logged_errors = []
self.log_new_error_counter = test_utils.CallCounter(
self._log_error_for_tests)
self.log_new_error_ctx = self.swap(
email_manager, 'log_new_error', self.log_new_error_counter)
self.logged_info = []
self.log_new_info_ctx = self.swap(
logging, 'info', self._mock_logging_info)
self.suggestion_types_needing_reviewers = {
feconf.SUGGESTION_TYPE_ADD_QUESTION: {}
}
def test_email_not_sent_if_can_send_emails_is_false(self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
with self.cannot_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [], [],
self.suggestion_types_needing_reviewers)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0], 'This app cannot send emails to users.')
def test_email_not_sent_if_notifying_admins_reviewers_needed_is_disabled(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', False)
with self.can_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [], [],
self.suggestion_types_needing_reviewers)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'The "enable_admin_notifications_for_reviewer_shortage" '
'property must be enabled on the admin config page in order to '
'send admins the emails.')
def test_email_not_sent_if_no_admins_to_notify(self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
with self.can_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[], [], [],
self.suggestion_types_needing_reviewers)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0], 'There were no admins to notify.')
def test_email_not_sent_if_no_suggestion_types_that_need_reviewers(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
with self.can_send_emails_ctx, self.log_new_info_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [], [], {})
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(
self.logged_info[0],
'There were no suggestion types that needed more reviewers on the '
'Contributor Dashboard.')
def test_email_not_sent_if_admin_email_does_not_exist(self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
with self.can_send_emails_ctx, self.log_new_error_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
['admin_id_without_email'], [], [],
self.suggestion_types_needing_reviewers)
messages = self._get_all_sent_email_messages()
self.assertEqual(len(messages), 0)
self.assertEqual(self.log_new_error_counter.times_called, 1)
self.assertEqual(
self.logged_errors[0],
'There was no email for the given admin id: admin_id_without_email.'
)
def test_email_sent_to_admin_if_question_suggestions_need_reviewers(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_question_suggestion()
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{feconf.SUGGESTION_TYPE_ADD_QUESTION: {}})
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>question suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> where there are not '
'enough reviewers.'
'<br><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [], [],
self.suggestion_types_needing_reviewers)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_email_sent_to_admins_if_question_suggestions_need_reviewers(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_question_suggestion()
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{feconf.SUGGESTION_TYPE_ADD_QUESTION: {}})
expected_email_html_body_for_admin_1 = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>question suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> where there are not '
'enough reviewers.'
'<br><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
expected_email_html_body_for_admin_2 = (
'Hi user2,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>question suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> where there are not '
'enough reviewers.'
'<br><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id, self.admin_2_id], [], [],
suggestion_types_needing_reviewers)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_1)
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_1, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_2, self.admin_2_id,
self.CURRICULUM_ADMIN_2_EMAIL)
def test_admin_email_sent_if_translations_need_reviewers_for_one_lang(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_translation_suggestion_with_language_code('hi')
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: {'hi'}})
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>Hindi translation suggestions</b> created on '
'the <a href="%s%s">Contributor Dashboard page</a> where there '
'are not enough reviewers.<br><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [], [],
suggestion_types_needing_reviewers)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_admin_emails_sent_if_translations_need_reviewers_for_one_lang(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_translation_suggestion_with_language_code('hi')
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: {'hi'}})
expected_email_html_body_for_admin_1 = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>Hindi translation suggestions</b> created on '
'the <a href="%s%s">Contributor Dashboard page</a> where there '
'are not enough reviewers.<br><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
expected_email_html_body_for_admin_2 = (
'Hi user2,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>Hindi translation suggestions</b> created on '
'the <a href="%s%s">Contributor Dashboard page</a> where there '
'are not enough reviewers.<br><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id, self.admin_2_id], [], [],
suggestion_types_needing_reviewers)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_1)
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_1, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_2, self.admin_2_id,
self.CURRICULUM_ADMIN_2_EMAIL)
def test_admin_email_sent_if_translations_need_reviewers_for_multi_lang(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_translation_suggestion_with_language_code('fr')
self._create_translation_suggestion_with_language_code('hi')
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: {
'fr', 'hi'}})
expected_email_html_body = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>translation suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> in languages where '
'there are not enough reviewers. The languages that need more '
'reviewers are:'
'<br><ul>'
'<li><b>French</b></li><br>'
'<li><b>Hindi</b></li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [], [],
suggestion_types_needing_reviewers)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
def test_admin_emails_sent_if_translations_need_reviewers_for_multi_lang(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_translation_suggestion_with_language_code('fr')
self._create_translation_suggestion_with_language_code('hi')
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: {
'fr', 'hi'}})
expected_email_html_body_for_admin_1 = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>translation suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> in languages where '
'there are not enough reviewers. The languages that need more '
'reviewers are:'
'<br><ul>'
'<li><b>French</b></li><br>'
'<li><b>Hindi</b></li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
expected_email_html_body_for_admin_2 = (
'Hi user2,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>translation suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> in languages where '
'there are not enough reviewers. The languages that need more '
'reviewers are:'
'<br><ul>'
'<li><b>French</b></li><br>'
'<li><b>Hindi</b></li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id, self.admin_2_id], [], [],
suggestion_types_needing_reviewers)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_1)
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_2)
# Make sure correct email models are stored.
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_1, self.admin_1_id,
self.CURRICULUM_ADMIN_1_EMAIL)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_2, self.admin_2_id,
self.CURRICULUM_ADMIN_2_EMAIL)
def test_email_sent_to_admins_if_mutli_suggestion_types_needing_reviewers(
self):
config_services.set_property(
'committer_id',
'enable_admin_notifications_for_reviewer_shortage', True)
self._create_translation_suggestion_with_language_code('fr')
self._create_translation_suggestion_with_language_code('hi')
self._create_question_suggestion()
suggestion_types_needing_reviewers = (
suggestion_services.get_suggestion_types_that_need_reviewers())
self.assertDictEqual(
suggestion_types_needing_reviewers,
{
feconf.SUGGESTION_TYPE_TRANSLATE_CONTENT: {
'fr', 'hi'},
feconf.SUGGESTION_TYPE_ADD_QUESTION: {}
})
expected_email_html_body_for_admin_1 = (
'Hi user1,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>translation suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> in languages where '
'there are not enough reviewers. The languages that need more '
'reviewers are:'
'<br><ul>'
'<li><b>French</b></li><br>'
'<li><b>Hindi</b></li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
expected_email_html_body_for_admin_2 = (
'Hi user2,'
'<br><br>'
'In the <a href="%s%s#/roles">admin roles page,</a> please add '
'reviewers to the Contributor Dashboard community by entering '
'their username(s) and allow reviewing for the suggestion types '
'that need more reviewers bolded below.'
'<br><br>'
'There have been <b>translation suggestions</b> created on the '
'<a href="%s%s">Contributor Dashboard page</a> in languages where '
'there are not enough reviewers. The languages that need more '
'reviewers are:'
'<br><ul>'
'<li><b>French</b></li><br>'
'<li><b>Hindi</b></li><br>'
'</ul><br>'
'Thanks so much - we appreciate your help!<br><br>'
'Best Wishes!<br>'
'- The Oppia Contributor Dashboard Team' % (
feconf.OPPIA_SITE_URL, feconf.ADMIN_URL, feconf.OPPIA_SITE_URL,
feconf.CONTRIBUTOR_DASHBOARD_URL)
)
with self.can_send_emails_ctx:
email_manager.send_mail_to_notify_admins_that_reviewers_are_needed(
[self.admin_1_id], [self.admin_2_id], [],
suggestion_types_needing_reviewers)
# Make sure correct emails are sent.
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_1_EMAIL)
messages.sort(key=lambda m: m.html)
self.assertEqual(len(messages), 2)
self.assertEqual(messages[1].html, expected_email_html_body_for_admin_1)
messages = self._get_sent_email_messages(self.CURRICULUM_ADMIN_2_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body_for_admin_2)
# Make sure correct email models are stored.
sent_email_models = email_models.SentEmailModel.get_all().filter(
email_models.SentEmailModel.recipient_id == self.admin_1_id).fetch()
self.assertEqual(len(sent_email_models), 2)
sent_email_models.sort(key=lambda m: m.html_body)
sent_email_model = sent_email_models[1]
self.assertEqual(
sent_email_model.subject,
email_manager.ADMIN_NOTIFICATION_FOR_REVIEWER_SHORTAGE_EMAIL_DATA[
'email_subject'])
self.assertEqual(
sent_email_model.recipient_id, self.admin_1_id)
self.assertEqual(
sent_email_model.recipient_email, self.CURRICULUM_ADMIN_1_EMAIL)
self.assertEqual(
sent_email_model.html_body, expected_email_html_body_for_admin_1)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_ADD_CONTRIBUTOR_DASHBOARD_REVIEWERS)
self._assert_email_data_stored_in_sent_email_model_is_correct(
expected_email_html_body_for_admin_2, self.admin_2_id,
self.CURRICULUM_ADMIN_2_EMAIL)
class QueryStatusNotificationEmailTests(test_utils.EmailTestBase):
"""Test that email is send to submitter when query has completed
or failed.
"""
SUBMITTER_USERNAME = 'submit'
SUBMITTER_EMAIL = 'submit@example.com'
SENDER_USERNAME = 'sender'
SENDER_EMAIL = 'sender@example.com'
RECIPIENT_A_EMAIL = 'a@example.com'
RECIPIENT_A_USERNAME = 'usera'
RECIPIENT_B_EMAIL = 'b@example.com'
RECIPIENT_B_USERNAME = 'userb'
def setUp(self):
super(QueryStatusNotificationEmailTests, self).setUp()
self.signup(self.SUBMITTER_EMAIL, self.SUBMITTER_USERNAME)
self.submitter_id = self.get_user_id_from_email(self.SUBMITTER_EMAIL)
self.signup(self.SENDER_EMAIL, self.SENDER_USERNAME)
self.sender_id = self.get_user_id_from_email(self.SENDER_EMAIL)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.signup(self.RECIPIENT_A_EMAIL, self.RECIPIENT_A_USERNAME)
self.signup(self.RECIPIENT_B_EMAIL, self.RECIPIENT_B_USERNAME)
self.set_curriculum_admins([self.SENDER_USERNAME, ])
self.recipient_a_id = self.get_user_id_from_email(
self.RECIPIENT_A_EMAIL)
self.recipient_b_id = self.get_user_id_from_email(
self.RECIPIENT_B_EMAIL)
self.recipient_ids = [self.recipient_a_id, self.recipient_b_id]
def test_that_correct_completion_email_is_sent(self):
query_id = 'qid'
expected_email_subject = 'Query qid has successfully completed'
expected_email_html_body = (
'Hi submit,<br>'
'Your query with id qid has succesfully completed its '
'execution. Visit the result page '
'<a href="https://www.oppia.org/emaildashboardresult/qid">here</a> '
'to see result of your query.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi submit,\n'
'Your query with id qid has succesfully completed its '
'execution. Visit the result page here '
'to see result of your query.\n\n'
'Thanks!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
with self.can_send_emails_ctx:
email_manager.send_query_completion_email(
self.submitter_id, query_id)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.submitter_id)
self.assertEqual(
sent_email_model.recipient_email, self.SUBMITTER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION)
def test_that_correct_failure_email_is_sent(self):
query_id = 'qid'
query_params = {
'key1': 'val1',
'key2': 'val2'
}
expected_email_subject = 'Query qid has failed'
expected_email_html_body = (
'Hi submit,<br>'
'Your query with id qid has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.<br><br>'
'Thanks!<br>'
'<br>'
'Best wishes,<br>'
'The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
expected_email_text_body = (
'Hi submit,\n'
'Your query with id qid has failed due to error '
'during execution. '
'Please check the query parameters and submit query again.\n\n'
'Thanks!\n'
'\n'
'Best wishes,\n'
'The Oppia Team\n'
'\n'
'You can change your email preferences via the Preferences page.')
expected_admin_email_text_body = (
'(Sent from dev-project-id)\n\n'
'Query job with qid query id has failed in its execution.\n'
'Query parameters:\n\n'
'key1: val1\n'
'key2: val2\n')
with self.can_send_emails_ctx:
email_manager.send_query_failure_email(
self.submitter_id, query_id, query_params)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.SUBMITTER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
self.assertEqual(messages[0].body, expected_email_text_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.submitter_id)
self.assertEqual(
sent_email_model.recipient_email, self.SUBMITTER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_QUERY_STATUS_NOTIFICATION)
# Make sure that correct email is sent to admin.
admin_messages = self._get_sent_email_messages(
feconf.ADMIN_EMAIL_ADDRESS)
self.assertEqual(len(admin_messages), 1)
self.assertEqual(
admin_messages[0].body, expected_admin_email_text_body)
def test_send_user_query_email(self):
email_subject = 'Bulk Email User Query Subject'
email_body = 'Bulk Email User Query Body'
email_intent = feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION
with self.can_send_emails_ctx:
email_manager.send_user_query_email(
self.sender_id, self.recipient_ids,
email_subject,
email_body,
email_intent)
messages_a = self._get_sent_email_messages(
self.RECIPIENT_A_EMAIL)
self.assertEqual(len(messages_a), 1)
messages_b = self._get_sent_email_messages(
self.RECIPIENT_B_EMAIL)
self.assertEqual(len(messages_b), 1)
# Make sure correct email model is stored.
all_models = email_models.BulkEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, email_subject)
self.assertEqual(
sent_email_model.sender_id, self.sender_id)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (self.SENDER_USERNAME, self.SENDER_EMAIL))
self.assertEqual(
sent_email_model.intent,
email_intent)
class VoiceoverApplicationEmailUnitTest(test_utils.EmailTestBase):
"""Unit test related to voiceover application emails."""
APPLICANT_USERNAME = 'applicant'
APPLICANT_EMAIL = 'applicant@example.com'
def setUp(self):
super(VoiceoverApplicationEmailUnitTest, self).setUp()
self.signup(self.APPLICANT_EMAIL, self.APPLICANT_USERNAME)
self.applicant_id = self.get_user_id_from_email(self.APPLICANT_EMAIL)
user_services.update_email_preferences(
self.applicant_id, True, False, False, False)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_accepted_voiceover_application_email(
self.applicant_id, 'Lesson to voiceover', 'en')
messages = self._get_sent_email_messages(
self.APPLICANT_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_correct_accepted_voiceover_application_email_is_sent(self):
expected_email_subject = (
'[Accepted] Updates on submitted voiceover application')
expected_email_html_body = (
'Hi applicant,<br><br>'
'Congratulations! Your voiceover application for '
'"Lesson to voiceover" lesson got accepted and you have been '
'assigned with a voice artist role in the lesson. Now you will be '
'able to add voiceovers to the lesson in English '
'language.'
'<br><br>You can check the wiki page to learn'
'<a href="https://github.com/oppia/oppia/wiki/'
'Instructions-for-voice-artists">how to voiceover a lesson</a>'
'<br><br>'
'Thank you for helping improve Oppia\'s lessons!'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
with self.can_send_emails_ctx:
email_manager.send_accepted_voiceover_application_email(
self.applicant_id, 'Lesson to voiceover', 'en')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.APPLICANT_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.applicant_id)
self.assertEqual(
sent_email_model.recipient_email, self.APPLICANT_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES)
def test_that_correct_rejected_voiceover_application_email_is_sent(self):
expected_email_subject = 'Updates on submitted voiceover application'
expected_email_html_body = (
'Hi applicant,<br><br>'
'Your voiceover application for "Lesson to voiceover" lesson in '
'language English got rejected and the reviewer has left a message.'
'<br><br>Review message: A rejection message!<br><br>'
'You can create a new voiceover application through the'
'<a href="https://oppia.org/contributor-dashboard">'
'contributor dashboard</a> page.<br><br>'
'- The Oppia Team<br>'
'<br>'
'You can change your email preferences via the '
'<a href="http://localhost:8181/preferences">Preferences</a> page.')
with self.can_send_emails_ctx:
email_manager.send_rejected_voiceover_application_email(
self.applicant_id, 'Lesson to voiceover', 'en',
'A rejection message!')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.APPLICANT_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.applicant_id)
self.assertEqual(
sent_email_model.recipient_email, self.APPLICANT_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent,
feconf.EMAIL_INTENT_VOICEOVER_APPLICATION_UPDATES)
def test_can_send_emails_is_false_logs_error(self):
"""When feconf.CAN_SEND_EMAILS is false,
send_rejected_voiceover_application_email(*args) should log an error.
"""
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
with self.swap(logging, 'error', _mock_logging_function):
email_manager.send_rejected_voiceover_application_email(
self.applicant_id, 'Lesson to voiceover', 'en',
'A rejection message!')
expected_log_message = 'This app cannot send emails to users.'
self.assertEqual(
observed_log_messages, [expected_log_message])
class AccountDeletionEmailUnitTest(test_utils.EmailTestBase):
"""Unit test related to account deletion application emails."""
APPLICANT_USERNAME = 'applicant'
APPLICANT_EMAIL = 'applicant@example.com'
def setUp(self):
super(AccountDeletionEmailUnitTest, self).setUp()
self.signup(self.APPLICANT_EMAIL, self.APPLICANT_USERNAME)
self.applicant_id = self.get_user_id_from_email(self.APPLICANT_EMAIL)
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
def test_that_email_not_sent_if_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_account_deleted_email(
self.applicant_id, self.APPLICANT_EMAIL)
messages = self._get_sent_email_messages(
self.APPLICANT_EMAIL)
self.assertEqual(len(messages), 0)
def test_that_correct_account_deleted_email_is_sent(self):
expected_email_subject = 'Account deleted'
expected_email_html_body = (
'Hi applicant@example.com,<br><br>'
'Your account was successfully deleted.<br><br>'
'- The Oppia Team')
with self.can_send_emails_ctx:
email_manager.send_account_deleted_email(
self.applicant_id, self.APPLICANT_EMAIL)
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.APPLICANT_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.applicant_id)
self.assertEqual(
sent_email_model.recipient_email, self.APPLICANT_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_ACCOUNT_DELETED)
class BulkEmailsTests(test_utils.EmailTestBase):
SENDER_EMAIL = 'sender@example.com'
SENDER_USERNAME = 'sender'
FAKE_SENDER_EMAIL = 'fake@example.com'
FAKE_SENDER_USERNAME = 'fake'
RECIPIENT_A_EMAIL = 'a@example.com'
RECIPIENT_A_USERNAME = 'usera'
RECIPIENT_B_EMAIL = 'b@example.com'
RECIPIENT_B_USERNAME = 'userb'
def setUp(self):
super(BulkEmailsTests, self).setUp()
# SENDER is authorised sender.
# FAKE_SENDER is unauthorised sender.
# A and B are recipients.
self.signup(self.SENDER_EMAIL, self.SENDER_USERNAME)
self.sender_id = self.get_user_id_from_email(self.SENDER_EMAIL)
self.signup(self.FAKE_SENDER_EMAIL, self.FAKE_SENDER_USERNAME)
self.fake_sender_id = self.get_user_id_from_email(
self.FAKE_SENDER_EMAIL)
self.signup(self.RECIPIENT_A_EMAIL, self.RECIPIENT_A_USERNAME)
self.signup(self.RECIPIENT_B_EMAIL, self.RECIPIENT_B_USERNAME)
self.recipient_a_id = self.get_user_id_from_email(
self.RECIPIENT_A_EMAIL)
self.recipient_b_id = self.get_user_id_from_email(
self.RECIPIENT_B_EMAIL)
self.recipient_ids = [self.recipient_a_id, self.recipient_b_id]
self.set_curriculum_admins([self.SENDER_USERNAME])
self.can_send_emails_ctx = self.swap(feconf, 'CAN_SEND_EMAILS', True)
def test_that_correct_email_is_sent(self):
email_subject = 'Dummy subject'
email_html_body = 'Dummy email body.<br>'
email_text_body = 'Dummy email body.\n'
with self.can_send_emails_ctx:
email_manager.send_user_query_email(
self.sender_id, self.recipient_ids, email_subject,
email_html_body, feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION)
messages_a = self._get_sent_email_messages(
self.RECIPIENT_A_EMAIL)
self.assertEqual(len(messages_a), 1)
self.assertEqual(messages_a[0].html, email_html_body)
self.assertEqual(messages_a[0].body, email_text_body)
messages_b = self._get_sent_email_messages(
self.RECIPIENT_B_EMAIL)
self.assertEqual(len(messages_b), 1)
self.assertEqual(messages_b[0].html, email_html_body)
self.assertEqual(messages_b[0].body, email_text_body)
# Make sure correct email model is stored.
all_models = email_models.BulkEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 1)
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, email_subject)
self.assertEqual(
sent_email_model.html_body, email_html_body)
self.assertEqual(
sent_email_model.sender_id, self.sender_id)
self.assertEqual(
sent_email_model.sender_email,
'%s <%s>' % (self.SENDER_USERNAME, self.SENDER_EMAIL))
self.assertEqual(
sent_email_model.intent,
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION)
def test_email_not_sent_if_original_html_not_matches_cleaned_html(self):
email_subject = 'Dummy Email Subject'
email_html_body = 'Dummy email body.<td>'
with self.can_send_emails_ctx:
email_manager.send_user_query_email(
self.sender_id, self.recipient_ids,
email_subject, email_html_body,
feconf.BULK_EMAIL_INTENT_CREATE_EXPLORATION)
# Check that no email was sent.
messages_a = self._get_sent_email_messages(
self.RECIPIENT_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self._get_sent_email_messages(
self.RECIPIENT_B_EMAIL)
self.assertEqual(len(messages_b), 0)
def test_that_exception_is_raised_for_unauthorised_sender(self):
with self.can_send_emails_ctx, (
self.assertRaisesRegex(
Exception, 'Invalid sender_id for email')):
email_manager.send_user_query_email(
self.fake_sender_id, self.recipient_ids, 'email_subject',
'email_html_body', feconf.BULK_EMAIL_INTENT_MARKETING)
messages_a = self._get_sent_email_messages(
self.RECIPIENT_A_EMAIL)
self.assertEqual(len(messages_a), 0)
messages_b = self._get_sent_email_messages(
self.RECIPIENT_B_EMAIL)
self.assertEqual(len(messages_b), 0)
all_models = email_models.BulkEmailModel.get_all().fetch()
self.assertEqual(len(all_models), 0)
def test_that_test_email_is_sent_for_bulk_emails(self):
email_subject = 'Test Subject'
email_body = 'Test Body'
with self.can_send_emails_ctx:
email_manager.send_test_email_for_bulk_emails(
self.sender_id, email_subject, email_body
)
messages = self._get_sent_email_messages(self.SENDER_EMAIL)
self.assertEqual(len(messages), 1)
class EmailPreferencesTests(test_utils.EmailTestBase):
def test_can_users_receive_thread_email(self):
gae_ids = ('someUser1', 'someUser2')
exp_id = 'someExploration'
usernames = ('username1', 'username2')
emails = ('user1@example.com', 'user2@example.com')
user_ids = []
for user_id, username, user_email in zip(gae_ids, usernames, emails):
user_settings = user_services.create_new_user(user_id, user_email)
user_ids.append(user_settings.user_id)
user_services.set_username(user_settings.user_id, username)
# Both users can receive all emails in default setting.
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, True])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), msg=[True, True])
# First user have muted feedback notifications for this exploration,
# therefore he should receive only suggestion emails.
user_services.set_email_preferences_for_exploration(
user_ids[0], exp_id, mute_feedback_notifications=True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, True])
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [False, True])
# Second user have muted suggestion notifications for this exploration,
# therefore he should receive only feedback emails.
user_services.set_email_preferences_for_exploration(
user_ids[1], exp_id, mute_suggestion_notifications=True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, False])
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), [False, True])
# Both users have disabled all emails globally, therefore they
# should not receive any emails.
for user_id in user_ids:
user_services.update_email_preferences(
user_id, True, True, False, True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [False, False])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), msg=[False, False])
# Both users have unmuted feedback/suggestion emails for this
# exploration, but all emails are still disabled globally,
# therefore they should not receive any emails.
user_services.set_email_preferences_for_exploration(
user_ids[0], exp_id, mute_feedback_notifications=False)
user_services.set_email_preferences_for_exploration(
user_ids[1], exp_id, mute_suggestion_notifications=False)
user_services.update_email_preferences(
user_id, True, True, False, True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [False, False])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), msg=[False, False])
# Both user have enabled all emails globally, therefore they should
# receive all emails.
for user_id in user_ids:
user_services.update_email_preferences(
user_id, True, True, True, True)
self.assertListEqual(email_manager.can_users_receive_thread_email(
user_ids, exp_id, True), [True, True])
self.assertTrue(email_manager.can_users_receive_thread_email(
user_ids, exp_id, False), msg=[True, True])
class ModeratorActionEmailsTests(test_utils.EmailTestBase):
MODERATOR_EMAIL = 'moderator@example.com'
MODERATOR_USERNAME = 'moderator'
RECIPIENT_EMAIL = 'a@example.com'
RECIPIENT_USERNAME = 'usera'
def setUp(self):
super(ModeratorActionEmailsTests, self).setUp()
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_moderators([self.MODERATOR_USERNAME])
self.signup(self.RECIPIENT_EMAIL, self.RECIPIENT_USERNAME)
self.recipient_id = self.get_user_id_from_email(
self.RECIPIENT_EMAIL)
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
self.can_send_email_moderator_action_ctx = self.swap(
feconf, 'REQUIRE_EMAIL_ON_MODERATOR_ACTION', True)
def test_exception_raised_if_email_on_moderator_action_is_false(self):
with self.assertRaisesRegex(
Exception,
'For moderator emails to be sent, please ensure that '
'REQUIRE_EMAIL_ON_MODERATOR_ACTION is set to True.'):
email_manager.require_moderator_email_prereqs_are_satisfied()
def test_exception_raised_if_can_send_emails_is_false(self):
with self.can_send_email_moderator_action_ctx, self.assertRaisesRegex(
Exception,
'For moderator emails to be sent, please ensure that '
'CAN_SEND_EMAILS is set to True.'):
email_manager.require_moderator_email_prereqs_are_satisfied()
def test_correct_email_draft_received_on_exploration_unpublish(self):
expected_draft_text_body = (
'I\'m writing to inform you that '
'I have unpublished the above exploration.')
with self.can_send_emails_ctx, self.can_send_email_moderator_action_ctx:
d_text = email_manager.get_moderator_unpublish_exploration_email()
self.assertEqual(d_text, expected_draft_text_body)
def test_blank_draft_received_exploration_unpublish_exception_raised(self):
expected_draft_text_body = ''
with self.can_not_send_emails_ctx:
d_text = email_manager.get_moderator_unpublish_exploration_email()
self.assertEqual(d_text, expected_draft_text_body)
def test_correct_moderator_action_email_sent(self):
email_intent = 'unpublish_exploration'
exploration_title = 'Title'
email_html_body = 'Dummy email body.<br>'
with self.can_send_emails_ctx, (
self.can_send_email_moderator_action_ctx):
email_manager.send_moderator_action_email(
self.moderator_id, self.recipient_id,
email_intent, exploration_title, email_html_body)
messages = self._get_sent_email_messages(
self.RECIPIENT_EMAIL)
self.assertEqual(len(messages), 1)
class ContributionReviewerEmailTest(test_utils.EmailTestBase):
"""Test for assignment and removal of contribution reviewers."""
TRANSLATION_REVIEWER_EMAIL = 'translationreviewer@example.com'
VOICEOVER_REVIEWER_EMAIL = 'voiceoverreviewer@example.com'
QUESTION_REVIEWER_EMAIL = 'questionreviewer@example.com'
def setUp(self):
super(ContributionReviewerEmailTest, self).setUp()
self.signup(self.CURRICULUM_ADMIN_EMAIL, self.CURRICULUM_ADMIN_USERNAME)
self.signup(self.TRANSLATION_REVIEWER_EMAIL, 'translator')
self.signup(self.VOICEOVER_REVIEWER_EMAIL, 'voiceartist')
self.signup(self.QUESTION_REVIEWER_EMAIL, 'question')
self.translation_reviewer_id = self.get_user_id_from_email(
self.TRANSLATION_REVIEWER_EMAIL)
user_services.update_email_preferences(
self.translation_reviewer_id, True, False, False, False)
self.voiceover_reviewer_id = self.get_user_id_from_email(
self.VOICEOVER_REVIEWER_EMAIL)
user_services.update_email_preferences(
self.voiceover_reviewer_id, True, False, False, False)
self.question_reviewer_id = self.get_user_id_from_email(
self.QUESTION_REVIEWER_EMAIL)
user_services.update_email_preferences(
self.question_reviewer_id, True, False, False, False)
self.can_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', True)
self.can_not_send_emails_ctx = self.swap(
feconf, 'CAN_SEND_EMAILS', False)
def test_assign_translation_reviewer_email_for_can_send_emails_is_false(
self):
with self.can_not_send_emails_ctx:
email_manager.send_email_to_new_contribution_reviewer(
self.translation_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION,
language_code='hi')
messages = self._get_sent_email_messages(
self.TRANSLATION_REVIEWER_EMAIL)
self.assertEqual(len(messages), 0)
def test_assign_translation_reviewer_email_for_invalid_review_category(
self):
with self.assertRaisesRegex(Exception, 'Invalid review_category'):
email_manager.send_email_to_new_contribution_reviewer(
self.translation_reviewer_id, 'invalid_category')
def test_schema_of_new_reviewer_email_data_constant(self):
self.assertEqual(sorted(email_manager.NEW_REVIEWER_EMAIL_DATA.keys()), [
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER])
for category_details in email_manager.NEW_REVIEWER_EMAIL_DATA.values():
self.assertEqual(len(category_details), 4)
self.assertTrue(
'description' in category_details or (
'description_template' in category_details))
self.assertTrue('review_category' in category_details)
self.assertTrue(
'rights_message' in category_details or (
'rights_message_template' in category_details))
self.assertTrue('to_check' in category_details)
def test_send_assigned_translation_reviewer_email(self):
expected_email_subject = (
'You have been invited to review Oppia translations')
expected_email_html_body = (
'Hi translator,<br><br>'
'This is to let you know that the Oppia team has added you as a '
'reviewer for Hindi language translations. This allows you to '
'review translation suggestions made by contributors in the '
'Hindi language.<br><br>'
'You can check the translation suggestions waiting for review in '
'the <a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
with self.can_send_emails_ctx:
email_manager.send_email_to_new_contribution_reviewer(
self.translation_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION,
language_code='hi')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.TRANSLATION_REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.translation_reviewer_id)
self.assertEqual(
sent_email_model.recipient_email,
self.TRANSLATION_REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER)
def test_send_assigned_voiceover_reviewer_email(self):
expected_email_subject = (
'You have been invited to review Oppia voiceovers')
expected_email_html_body = (
'Hi voiceartist,<br><br>'
'This is to let you know that the Oppia team has added you as a '
'reviewer for Hindi language voiceovers. This allows you to '
'review voiceover applications made by contributors in the '
'Hindi language.<br><br>'
'You can check the voiceover applications waiting for review in '
'the <a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
with self.can_send_emails_ctx:
email_manager.send_email_to_new_contribution_reviewer(
self.voiceover_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER,
language_code='hi')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.VOICEOVER_REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.voiceover_reviewer_id)
self.assertEqual(
sent_email_model.recipient_email,
self.VOICEOVER_REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER)
def test_send_assigned_question_reviewer_email(self):
expected_email_subject = (
'You have been invited to review Oppia questions')
expected_email_html_body = (
'Hi question,<br><br>'
'This is to let you know that the Oppia team has added you as a '
'reviewer for questions. This allows you to review question '
'suggestions made by contributors.<br><br>'
'You can check the question suggestions waiting for review in the '
'<a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
with self.can_send_emails_ctx:
email_manager.send_email_to_new_contribution_reviewer(
self.question_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION,
language_code='hi')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.QUESTION_REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.question_reviewer_id)
self.assertEqual(
sent_email_model.recipient_email, self.QUESTION_REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_ONBOARD_REVIEWER)
def test_email_is_not_sent_can_send_emails_is_false(self):
with self.can_not_send_emails_ctx:
email_manager.send_email_to_removed_contribution_reviewer(
self.translation_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION,
language_code='hi')
messages = self._get_sent_email_messages(
self.TRANSLATION_REVIEWER_EMAIL)
self.assertEqual(len(messages), 0)
def test_remove_translation_reviewer_email_for_invalid_review_category(
self):
with self.assertRaisesRegex(Exception, 'Invalid review_category'):
email_manager.send_email_to_removed_contribution_reviewer(
self.translation_reviewer_id, 'invalid_category')
def test_schema_of_removed_reviewer_email_data_constant(self):
self.assertEqual(
sorted(email_manager.REMOVED_REVIEWER_EMAIL_DATA.keys()), [
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER])
for category_details in (
email_manager.REMOVED_REVIEWER_EMAIL_DATA.values()):
self.assertEqual(len(category_details), 4)
self.assertTrue(
'role_description' in category_details or (
'role_description_template' in category_details))
self.assertTrue('review_category' in category_details)
self.assertTrue(
'rights_message' in category_details or (
'rights_message_template' in category_details))
self.assertTrue('contribution_allowed' in category_details)
def test_send_removed_translation_reviewer_email(self):
expected_email_subject = (
'You have been unassigned as a translation reviewer')
expected_email_html_body = (
'Hi translator,<br><br>'
'The Oppia team has removed you from the translation reviewer role '
'in the Hindi language. You won\'t be able to review translation '
'suggestions made by contributors in the Hindi language any more, '
'but you can still contribute translations through the '
'<a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
with self.can_send_emails_ctx:
email_manager.send_email_to_removed_contribution_reviewer(
self.translation_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION,
language_code='hi')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.TRANSLATION_REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.translation_reviewer_id)
self.assertEqual(
sent_email_model.recipient_email,
self.TRANSLATION_REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_REMOVE_REVIEWER)
def test_send_removed_voiceover_reviewer_email(self):
expected_email_subject = (
'You have been unassigned as a voiceover reviewer')
expected_email_html_body = (
'Hi voiceartist,<br><br>'
'The Oppia team has removed you from the voiceover reviewer role '
'in the Hindi language. You won\'t be able to review voiceover '
'applications made by contributors in the Hindi language any more, '
'but you can still contribute voiceovers through the '
'<a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
with self.can_send_emails_ctx:
email_manager.send_email_to_removed_contribution_reviewer(
self.voiceover_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER,
language_code='hi')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.VOICEOVER_REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.voiceover_reviewer_id)
self.assertEqual(
sent_email_model.recipient_email, self.VOICEOVER_REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_REMOVE_REVIEWER)
def test_send_removed_question_reviewer_email(self):
expected_email_subject = (
'You have been unassigned as a question reviewer')
expected_email_html_body = (
'Hi question,<br><br>'
'The Oppia team has removed you from the question reviewer role. '
'You won\'t be able to review question suggestions made by '
'contributors any more, but you can still contribute questions '
'through the <a href="https://www.oppia.org/contributor-dashboard">'
'Contributor Dashboard</a>.<br><br>'
'Thanks, and happy contributing!<br><br>'
'Best wishes,<br>'
'The Oppia Community')
with self.can_send_emails_ctx:
email_manager.send_email_to_removed_contribution_reviewer(
self.question_reviewer_id,
constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_QUESTION,
language_code='hi')
# Make sure correct email is sent.
messages = self._get_sent_email_messages(
self.QUESTION_REVIEWER_EMAIL)
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].html, expected_email_html_body)
# Make sure correct email model is stored.
all_models = email_models.SentEmailModel.get_all().fetch()
sent_email_model = all_models[0]
self.assertEqual(
sent_email_model.subject, expected_email_subject)
self.assertEqual(
sent_email_model.recipient_id, self.question_reviewer_id)
self.assertEqual(
sent_email_model.recipient_email, self.QUESTION_REVIEWER_EMAIL)
self.assertEqual(
sent_email_model.sender_id, feconf.SYSTEM_COMMITTER_ID)
self.assertEqual(
sent_email_model.sender_email,
'Site Admin <%s>' % feconf.NOREPLY_EMAIL_ADDRESS)
self.assertEqual(
sent_email_model.intent, feconf.EMAIL_INTENT_REMOVE_REVIEWER)
|
brianrodri/oppia
|
core/domain/email_manager_test.py
|
Python
|
apache-2.0
| 274,978
|
[
"VisIt"
] |
87a04676910c1b7beb921e3c0303d03537746a4e57ddc2d9e090aa672a7ab21d
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from .fake_webapp import EXAMPLE_APP
class IsTextPresentTest(object):
def test_is_text_present(self):
"should verify if text is present"
self.assertTrue(self.browser.is_text_present("Example Header"))
def test_is_text_present_and_should_return_false(self):
"should verify if text is present and return false"
self.assertFalse(self.browser.is_text_present("Text that not exist"))
def test_is_text_present_and_should_wait_time(self):
"should verify if text is present and wait for five seconds"
self.browser.find_link_by_text("FOO").click()
self.assertTrue(self.browser.is_text_present("BAR!", wait_time=5))
def test_is_text_not_present(self):
"should verify if text is not present"
self.assertTrue(self.browser.is_text_not_present("Text that not exist"))
def test_is_text_not_present_and_should_return_false(self):
"should verify if text is not prasent and return false"
self.assertFalse(self.browser.is_text_not_present("Example Header"))
def test_is_text_not_present_and_should_wait_time(self):
"should verify if text is not present and wait for five seconds"
self.browser.find_link_by_text("FOO").click()
self.assertTrue(self.browser.is_text_not_present("another text", wait_time=5))
def test_is_text_present_no_body(self):
"should work properly (return false) even if there's no body"
self.browser.visit(EXAMPLE_APP + "no-body")
self.assertFalse(self.browser.is_text_present("No such text"))
def test_is_text_not_present_no_body(self):
"returns true if there's no body"
self.browser.visit(EXAMPLE_APP + "no-body")
self.assertTrue(self.browser.is_text_not_present("No such text"))
|
bmcculley/splinter
|
tests/is_text_present.py
|
Python
|
bsd-3-clause
| 1,967
|
[
"VisIt"
] |
1dca7f9e838e950926469e70a42629feb73846afbe2d4a2235b7fd381ca27cd1
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-05-19 17:56:30
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-07-06 14:43:00
# from __future__ import print_function, division, absolute_import
# from marvin.tests.api.conftest import ApiPage
# import pytest
#
#
# @pytest.mark.parametrize('page', [('api', 'getSpectrum')], ids=['getspectrum'], indirect=True)
# class TestGetSpectrum(object):
#
# @pytest.mark.parametrize('reqtype', [('get'), ('post')])
# @pytest.mark.parametrize('x, y', [(17, 17)])
# def test_spec_success(self, galaxy, page, params, reqtype, x, y):
# params.update({'name': galaxy.plateifu, 'x': x, 'y': y})
# data = {'flux': [], 'ivar': [], 'mask': [], 'wavelength': [], 'specres': []}
# page.load_page(reqtype, page.url.format(**params), params=params)
# page.assert_success(data, keys=True)
#
# @pytest.mark.parametrize('reqtype', [('get'), ('post')])
# @pytest.mark.parametrize('name, missing, errmsg, x, y',
# [(None, 'release', 'Missing data for required field.', 0, 0),
# ('badname', 'name', 'String does not match expected pattern.', 0, 0),
# ('84', 'name', 'Shorter than minimum length 4.', 0, 0),
# ('8485-1901', 'x', 'Must be between 0 and 100.', -1, 17),
# ('8485-1901', 'y', 'Must be between 0 and 100.', 17, -1)],
# ids=['norelease', 'badname', 'shortname', 'badx', 'bady'])
# def test_spec_failure(self, galaxy, page, reqtype, params, name, missing, errmsg, x, y):
# params.update({'name': name, 'x': x, 'y': y})
# if name is None:
# params.update({'name': galaxy.plateifu})
# page.route_no_valid_params(page.url.format(**params), missing, reqtype=reqtype, errmsg=errmsg)
# else:
# page.route_no_valid_params(page.url.format(**params), missing, reqtype=reqtype, params=params, errmsg=errmsg)
#
#
# @pytest.mark.parametrize('page', [('api', 'getProperties')], ids=['getproperties'], indirect=True)
# class TestGetProperties(object):
#
# @pytest.mark.parametrize('reqtype', [('get'), ('post')])
# @pytest.mark.parametrize('expprop', [('emline_gflux_ha_6564')], ids=['haflux'])
# def test_props_success(self, galaxy, page, params, reqtype, expprop):
# params.update({'name': galaxy.plateifu, 'x': galaxy.dap['x'], 'y': galaxy.dap['y'],
# 'template': galaxy.template.name})
# page.load_page(reqtype, page.url.format(**params), params=params)
# page.assert_success()
# expval = galaxy.dap[galaxy.template.name][expprop]
# props = page.json['data']['properties']
# assert expprop in props
# assert expval == props[expprop]
#
# @pytest.mark.parametrize('reqtype', [('get'), ('post')])
# @pytest.mark.parametrize('name, missing, errmsg, x, y, template',
# [(None, 'release', 'Missing data for required field.', 0, 0, None),
# ('badname', 'name', 'String does not match expected pattern.', 0, 0, None),
# ('84', 'name', 'Shorter than minimum length 4.', 0, 0, 'GAU-MILESHC'),
# ('8485-1901', 'x', 'Must be between 0 and 100.', -1, 17, 'GAU-MILESHC'),
# ('8485-1901', 'y', 'Must be between 0 and 100.', 17, -1, 'GAU-MILESHC'),
# ('8485-1901', 'template', 'Not a valid choice.', 17, 17, 'MILESHC')],
# ids=['norelease', 'badname', 'shortname', 'badx', 'bady', 'badtemplate'])
# def test_props_failure(self, galaxy, page, reqtype, params, name, missing, errmsg, x, y, template):
# params.update({'name': name, 'x': x, 'y': y, 'template': template})
# if name is None:
# params.update({'name': galaxy.plateifu})
# page.route_no_valid_params(page.url.format(**params), missing, reqtype=reqtype, errmsg=errmsg)
# else:
# page.route_no_valid_params(page.url.format(**params), missing, reqtype=reqtype, params=params, errmsg=errmsg)
#
#
# @pytest.mark.parametrize('page', [('api', 'getModels')], ids=['getmodels'], indirect=True)
# class TestGetModels(object):
#
# @pytest.mark.parametrize('reqtype', [('get'), ('post')])
# def test_models_success(self, galaxy, page, params, reqtype):
# if galaxy.release == 'MPL-4':
# pytest.skip('MPL-4 does not have modelcubes')
# params.update({'name': galaxy.plateifu, 'x': galaxy.dap['x'],
# 'y': galaxy.dap['y'], 'template': galaxy.template.name})
# page.load_page(reqtype, page.url.format(**params), params=params)
# data = {'bintype': galaxy.bintype.name, 'template': galaxy.template.name, 'flux_array': [],
# 'flux_mask': [], 'flux_ivar': [], 'model_array': [], 'model_emline': [],
# 'model_emline_base': [], 'model_emline_mask': []}
# page.assert_success(data, keys=True)
# jdata = page.json['data']
# expdata = galaxy.dap[galaxy.template.name]['model']
# mcdata = [jdata['flux_array'][0], jdata['flux_ivar'][0], jdata['flux_mask'][0],
# jdata['model_array'][0], jdata['model_emline'][0], jdata['model_emline_base'][0],
# jdata['model_emline_mask'][0]]
# assert expdata == mcdata
#
# @pytest.mark.parametrize('reqtype', [('get'), ('post')])
# @pytest.mark.parametrize('name, missing, errmsg, x, y, template',
# [(None, 'release', 'Missing data for required field.', 0, 0, None),
# ('badname', 'name', 'String does not match expected pattern.', 0, 0, None),
# ('84', 'name', 'Shorter than minimum length 4.', 0, 0, 'GAU-MILESHC'),
# ('8485-1901', 'x', 'Must be between 0 and 100.', -1, 17, 'GAU-MILESHC'),
# ('8485-1901', 'y', 'Must be between 0 and 100.', 17, -1, 'GAU-MILESHC'),
# ('8485-1901', 'template', 'Not a valid choice.', 17, 17, 'MILESHC')],
# ids=['norelease', 'badname', 'shortname', 'badx', 'bady', 'badtemplate'])
# def test_models_failure(self, galaxy, page, reqtype, params, name, missing, errmsg, x, y, template):
# params.update({'name': name, 'x': x, 'y': y, 'template': template})
# if name is None:
# params.update({'name': galaxy.plateifu})
# page.route_no_valid_params(page.url.format(**params), missing, reqtype=reqtype, errmsg=errmsg)
# else:
# page.route_no_valid_params(page.url.format(**params), missing, reqtype=reqtype, params=params, errmsg=errmsg)
|
albireox/marvin
|
python/marvin/tests/api/test_spaxel.py
|
Python
|
bsd-3-clause
| 6,974
|
[
"Brian",
"Galaxy"
] |
4d3bd7ddac67e7f246afffd940edea3ac9d3d9373827a9e65e0984fd4ae7d11d
|
# f90wrap: F90 to Python interface generator with derived type support
#
# Copyright James Kermode 2011-2018
#
# This file is part of f90wrap
# For the latest version see github.com/jameskermode/f90wrap
#
# f90wrap is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# f90wrap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with f90wrap. If not, see <http://www.gnu.org/licenses/>.
#
# If you would like to license the source code under different terms,
# please contact James Kermode, james.kermode@gmail.com
import logging
import os
import warnings
import numpy as np
from f90wrap import codegen as cg
from f90wrap import fortran as ft
from f90wrap.six import string_types # Python 2/3 compatibility library
from f90wrap.transform import ArrayDimensionConverter
log = logging.getLogger(__name__)
class F90WrapperGenerator(ft.FortranVisitor, cg.CodeGenerator):
"""
Creates the Fortran90 code necessary to wrap a given Fortran parse tree
suitable for input to `f2py`.
Each node of the tree (Module, Subroutine etc.) is wrapped according to the
rules in this class when visited (using `F90WrapperGenerator.visit()`).
Each module's wrapper is written to a separate file, with top-level
procedures written to another separate file. Derived-types and arrays (both
of normal types and derive-types) are specially treated. For each, a number
of subroutines allowing the getting/setting of items, and retrieval of array
length are written. Furthermore, derived-types are treated as opaque
references to enable wrapping with `f2py`.
Parameters
----------
prefix : `str`
A string with which to prefix module, subroutine and type names.
sizeof_fortran_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
string_lengths : `dict`
This is never used...
abort_func : `str`
Name of a Fortran function to be invoked when a fatal error occurs
kind_map : `dict`
Dictionary mapping Fortran types and kinds to C-types
types: `dict`
Dictionary mapping type names to Fortran modules where they are defined
"""
def __init__(self, prefix, sizeof_fortran_t, string_lengths, abort_func,
kind_map, types, default_to_inout, max_length=None):
if max_length is None:
max_length = 120
cg.CodeGenerator.__init__(self, indent=' ' * 4,
max_length=max_length,
continuation='&',
comment='!')
ft.FortranVisitor.__init__(self)
self.prefix = prefix
self.sizeof_fortran_t = sizeof_fortran_t
self.string_lengths = string_lengths
self.abort_func = abort_func
self.kind_map = kind_map
self.types = types
self.default_to_inout = default_to_inout
def visit_Root(self, node):
"""
Write a wrapper for top-level procedures.
"""
# clean up any previous wrapper files
top_level_wrapper_file = '%s%s.f90' % (self.prefix, 'toplevel')
f90_wrapper_files = (['%s%s.f90' % (self.prefix,
os.path.splitext(os.path.basename(mod.filename))[0])
for mod in node.modules] +
[top_level_wrapper_file])
for f90_wrapper_file in f90_wrapper_files:
if os.path.exists(f90_wrapper_file):
os.unlink(f90_wrapper_file)
self.code = []
self.generic_visit(node)
if len(self.code) > 0:
f90_wrapper_file = open(top_level_wrapper_file, 'w')
f90_wrapper_file.write(str(self))
f90_wrapper_file.close()
def visit_Module(self, node):
"""
Wrap modules. Each Fortran module generates one wrapper source file.
Subroutines and elements within each module are properly wrapped.
"""
log.info('F90WrapperGenerator visiting module %s' % node.name)
self.code = []
self.write('! Module %s defined in file %s' % (node.name, node.filename))
self.write()
self.generic_visit(node)
for el in node.elements:
dims = list(filter(lambda x: x.startswith('dimension'), el.attributes))
if len(dims) == 0: # proper scalar type (normal or derived)
self._write_scalar_wrappers(node, el, self.sizeof_fortran_t)
elif el.type.startswith('type'): # array of derived types
self._write_dt_array_wrapper(node, el, dims[0], self.sizeof_fortran_t)
else:
if 'parameter' not in el.attributes:
self._write_sc_array_wrapper(node, el, dims[0], self.sizeof_fortran_t)
self.write('! End of module %s defined in file %s' % (node.name, node.filename))
self.write()
if len(self.code) > 0:
f90_wrapper_name = '%s%s.f90' % (self.prefix, os.path.splitext(os.path.basename(node.filename))[0])
if os.path.exists(f90_wrapper_name):
warnings.warn('Source file %s contains code for more than one module!' % node.filename)
f90_wrapper_file = open(f90_wrapper_name, 'a')
f90_wrapper_file.write(str(self))
f90_wrapper_file.close()
self.code = []
def write_uses_lines(self, node, extra_uses_dict=None):
"""
Write "uses mod, only: sub" lines to the code.
Parameters
----------
node : Node of parse tree
"""
all_uses = {}
node_uses = []
if hasattr(node, 'uses'):
for use in node.uses:
if isinstance(use, string_types):
node_uses.append((use, None))
else:
node_uses.append(use)
if extra_uses_dict is not None:
for (mod, only) in extra_uses_dict.items():
node_uses.append((mod, only))
if node_uses:
for (mod, only) in node_uses:
if mod in all_uses:
if only is None:
continue
for symbol in only:
if all_uses[mod] is None:
all_uses[mod] = []
if symbol not in all_uses[mod]:
all_uses[mod] += [symbol]
elif only is not None:
all_uses[mod] = list(only)
else:
all_uses[mod] = None
for mod, only in all_uses.items():
if only is not None:
self.write('use %s, only: %s' % (mod, ', '.join(set(only)))) # YANN: "set" to avoid derundancy
else:
self.write('use %s' % mod)
def write_super_type_lines(self, ty):
self.write('type ' + ty.name)
self.indent()
for el in ty.elements:
self.write(el.type + ''.join(', ' + attr for attr in el.attributes) + ' :: ' + el.name)
self.dedent()
self.write('end type ' + ty.name)
self.write()
def write_type_lines(self, tname, recursive=False):
"""
Write a pointer type for a given type name
Parameters
----------
tname : `str`
Should be the name of a derived type in the wrapped code.
recursive : `boolean`
Adjusts array pointer for recursive derived type array
"""
tname = ft.strip_type(tname)
if not recursive:
self.write("""type %(typename)s_ptr_type
type(%(typename)s), pointer :: p => NULL()
end type %(typename)s_ptr_type""" % {'typename': tname})
else:
self.write("""type %(typename)s_rec_ptr_type
type(%(typename)s), pointer :: p => NULL()
end type %(typename)s_rec_ptr_type""" % {'typename': tname})
def write_arg_decl_lines(self, node):
"""
Write argument declaration lines to the code
Takes care of argument attributes, and opaque references for derived
types, as well as f2py-specific lines.
"""
for arg in node.arguments:
if 'callback' in arg.attributes:
return 'external ' + arg.name
attributes = [attr for attr in arg.attributes if attr in ('optional', 'pointer', 'intent(in)',
'intent(out)', 'intent(inout)') or
attr.startswith('dimension')]
arg_dict = {'arg_type': arg.type,
'type_name': arg.type.startswith('type') and arg.type[5:-1] or None,
'arg_name': arg.name} # self.prefix+arg.name}
if arg.name in node.transfer_in or arg.name in node.transfer_out:
self.write('type(%(type_name)s_ptr_type) :: %(arg_name)s_ptr' % arg_dict)
arg_dict['arg_type'] = arg.wrapper_type
attributes.append('dimension(%d)' % arg.wrapper_dim)
arg_dict['arg_attribs'] = ', '.join(attributes)
arg_dict['comma'] = len(attributes) != 0 and ', ' or ''
#character array definition
#https://github.com/numpy/numpy/issues/18684
if arg.type == 'character(*)' :
arg_dict['arg_type'] = 'character*(*)'
self.write('%(arg_type)s%(comma)s%(arg_attribs)s :: %(arg_name)s' % arg_dict)
if hasattr(arg, 'f2py_line'):
self.write(arg.f2py_line)
elif self.default_to_inout and all('intent' not in attr for attr in arg.attributes):
# No f2py instruction and no explicit intent : force f2py to make the argument intent(inout)
# This is put as an option to prserve backwards compatibility
self.write('!f2py intent(inout) ' + arg.name)
def write_transfer_in_lines(self, node):
"""
Write transfer of opaque references.
"""
for arg in node.arguments:
arg_dict = {'arg_name': arg.name, # self.prefix+arg.name,
'arg_type': arg.type}
if arg.name in node.transfer_in:
if 'optional' in arg.attributes:
self.write("if (present(%(arg_name)s)) then" % arg_dict)
self.indent()
self.write('%(arg_name)s_ptr = transfer(%(arg_name)s, %(arg_name)s_ptr)' % arg_dict)
if 'optional' in arg.attributes:
self.dedent()
self.write('else')
self.indent()
self.write('%(arg_name)s_ptr%%p => null()' % arg_dict)
self.dedent()
self.write('end if')
def write_init_lines(self, node):
"""
Write special user-provided init lines to a node.
"""
for alloc in node.allocate:
self.write('allocate(%s_ptr%%p)' % alloc) # (self.prefix, alloc))
for arg in node.arguments:
if not hasattr(arg, 'init_lines'):
continue
exe_optional, exe = arg.init_lines
D = {'OLD_ARG': arg.name,
'ARG': arg.name, # self.prefix+arg.name,
'PTR': arg.name + '_ptr%p'}
if 'optional' in arg.attributes:
self.write(exe_optional % D)
else:
self.write(exe % D)
def write_call_lines(self, node, func_name):
"""
Write line that calls a single wrapped Fortran routine
"""
if 'skip_call' in node.attributes:
return
orig_node = node
arg_node = node
if hasattr(node, 'orig_node'):
orig_node = node.orig_node
arg_node = orig_node # get arguemnt list from original node
def dummy_arg_name(arg):
return arg.orig_name
def actual_arg_name(arg):
name = arg.name
if ((hasattr(node, 'transfer_in') and arg.name in node.transfer_in) or
(hasattr(node, 'transfer_out') and arg.name in node.transfer_out)):
name += '_ptr%p'
if 'super-type' in arg.doc:
name += '%items'
return name
if node.mod_name is not None:
# use keyword arguments if subroutine is in a module and we have an explicit interface
arg_names = ['%s=%s' % (dummy_arg_name(arg), actual_arg_name(arg))
for arg in arg_node.arguments
if 'intent(hide)' not in arg.attributes]
else:
arg_names = [actual_arg_name(arg) for arg in arg_node.arguments
if 'intent(hide)' not in arg.attributes]
if isinstance(orig_node, ft.Function):
self.write('%(ret_val)s = %(func_name)s(%(arg_names)s)' %
{'ret_val': actual_arg_name(orig_node.ret_val),
'func_name': func_name,
'arg_names': ', '.join(arg_names)})
else:
if func_name == 'assignment(=)':
if len(arg_names) != 2:
raise RuntimeError("assignment(=) interface with len(arg_names) != 2")
arg_names = [arg_name.split('=')[1] for arg_name in arg_names]
self.write('%(lhs)s = %(rhs)s' %
{'lhs': arg_names[0],
'rhs': arg_names[1]})
else:
self.write('call %(sub_name)s(%(arg_names)s)' %
{'sub_name': func_name,
'arg_names': ', '.join(arg_names)})
def write_transfer_out_lines(self, node):
"""
Write transfer from opaque reference.
"""
for arg in node.arguments:
if arg.name in node.transfer_out:
self.write('%(arg_name)s = transfer(%(arg_name)s_ptr, %(arg_name)s)' %
{'arg_name': arg.name})
def write_finalise_lines(self, node):
"""
Deallocate the opaque reference to clean up.
"""
for dealloc in node.deallocate:
self.write('deallocate(%s_ptr%%p)' % dealloc) # (self.prefix, dealloc))
def visit_Procedure(self, node):
"""
Write wrapper code necessary for a Fortran subroutine or function
"""
call_name = node.name
if hasattr(node, 'call_name'):
call_name = node.call_name
log.info(
'F90WrapperGenerator visiting routine %s call_name %s mod_name %r' % (node.name, call_name, node.mod_name))
self.write("subroutine %(sub_name)s%(arg_names)s" %
{'sub_name': self.prefix + node.name,
'arg_names': '(' + ', '.join([arg.name for arg in node.arguments]) + ')'
if node.arguments else ''})
self.indent()
self.write_uses_lines(node)
self.write("implicit none")
if node.mod_name is None:
self.write('external %s' % call_name)
if hasattr(node, 'orig_node') and isinstance(node.orig_node, ft.Function):
self.write('%s %s' % (node.orig_node.ret_val.type, node.name))
self.write()
for tname in node.types:
if tname in self.types and 'super-type' in self.types[tname].doc:
self.write_super_type_lines(self.types[tname])
self.write_type_lines(tname)
self.write_arg_decl_lines(node)
self.write_transfer_in_lines(node)
self.write_init_lines(node)
self.write_call_lines(node, call_name)
self.write_transfer_out_lines(node)
self.write_finalise_lines(node)
self.dedent()
self.write("end subroutine %(sub_name)s" % {'sub_name': self.prefix + node.name})
self.write()
return self.generic_visit(node)
def visit_Type(self, node):
"""
Properly wraps derived types, including derived-type arrays.
"""
log.info('F90WrapperGenerator visiting type %s' % node.name)
for el in node.elements:
dims = list(filter(lambda x: x.startswith('dimension'), el.attributes))
if len(dims) == 0: # proper scalar type (normal or derived)
self._write_scalar_wrappers(node, el, self.sizeof_fortran_t)
elif el.type.startswith('type'): # array of derived types
self._write_dt_array_wrapper(node, el, dims[0], self.sizeof_fortran_t)
else:
self._write_sc_array_wrapper(node, el, dims[0], self.sizeof_fortran_t)
return self.generic_visit(node)
def _write_sc_array_wrapper(self, t, el, dims, sizeof_fortran_t):
"""
Write wrapper for arrays of intrinsic types
Parameters
----------
t : `fortran.Type` node
Derived-type node of the parse tree.
el : `fortran.Element` node
An element of a module which is derived-type array
dims : `tuple` of `int`s
The dimensions of the element
sizeof_fortan_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
"""
if isinstance(t, ft.Type):
this = 'this, '
else:
this = 'dummy_this, '
self.write('subroutine %s%s__array__%s(%snd, dtype, dshape, dloc)' % (self.prefix, t.name, el.orig_name, this))
self.indent()
if isinstance(t, ft.Module):
self.write_uses_lines(t, {t.name: ['%s_%s => %s' % (t.name, el.name, el.name)]})
else:
self.write_uses_lines(t)
self.write('implicit none')
if isinstance(t, ft.Type):
self.write_type_lines(t.name)
self.write('integer, intent(in) :: this(%d)' % sizeof_fortran_t)
self.write('type(%s_ptr_type) :: this_ptr' % t.name)
else:
self.write('integer, intent(in) :: dummy_this(%d)' % sizeof_fortran_t)
self.write('integer, intent(out) :: nd')
self.write('integer, intent(out) :: dtype')
try:
rank = len(ArrayDimensionConverter.split_dimensions(dims))
if el.type.startswith('character'):
rank += 1
except ValueError:
rank = 1
self.write('integer, dimension(10), intent(out) :: dshape')
self.write('integer*%d, intent(out) :: dloc' % np.dtype('O').itemsize)
self.write()
self.write('nd = %d' % rank)
self.write('dtype = %s' % ft.fortran_array_type(el.type, self.kind_map))
if isinstance(t, ft.Type):
self.write('this_ptr = transfer(this, this_ptr)')
array_name = 'this_ptr%%p%%%s' % el.orig_name
else:
array_name = '%s_%s' % (t.name, el.orig_name)
if 'allocatable' in el.attributes:
self.write('if (allocated(%s)) then' % array_name)
self.indent()
if el.type.startswith('character'):
first = ','.join(['1' for i in range(rank - 1)])
self.write('dshape(1:%d) = (/len(%s(%s)), shape(%s)/)' % (rank, array_name, first, array_name))
else:
self.write('dshape(1:%d) = shape(%s)' % (rank, array_name))
self.write('dloc = loc(%s)' % array_name)
if 'allocatable' in el.attributes:
self.dedent()
self.write('else')
self.indent()
self.write('dloc = 0')
self.dedent()
self.write('end if')
self.dedent()
self.write('end subroutine %s%s__array__%s' % (self.prefix, t.name, el.orig_name))
self.write()
def _write_dt_array_wrapper(self, t, element, dims,
sizeof_fortran_t):
"""
Write fortran get/set/len routines for a (1-dimensional) derived-type array.
Parameters
----------
t : `fortran.Type` node
Derived-type node of the parse tree.
el : `fortran.Element` node
An element of a module which is derived-type array
dims : `tuple` of `int`s
The dimensions of the element
sizeof_fortan_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
"""
if element.type.startswith('type') and len(ArrayDimensionConverter.split_dimensions(dims)) != 1:
return
self._write_array_getset_item(t, element, sizeof_fortran_t, 'get')
self._write_array_getset_item(t, element, sizeof_fortran_t, 'set')
self._write_array_len(t, element, sizeof_fortran_t)
def _write_scalar_wrappers(self, t, element, sizeof_fortran_t):
"""
Write fortran get/set routines for scalar derived-types
Parameters
----------
t : `fortran.Type` node
Derived-type node of the parse tree.
el : `fortran.Element` node
An element of a module which is derived-type array
sizeof_fortan_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
"""
self._write_scalar_wrapper(t, element, sizeof_fortran_t, "get")
if 'parameter' not in element.attributes:
self._write_scalar_wrapper(t, element, sizeof_fortran_t, "set")
def _write_array_getset_item(self, t, el, sizeof_fortran_t, getset):
"""
Write a subroutine to get/set items in a derived-type array.
Parameters
----------
t : `fortran.Type` node
Derived-type node of the parse tree.
el : `fortran.Element` node
An element of a module which is derived-type array
sizeof_fortan_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
getset : `str` {``"get"``,``"set"``}
String indicating whether to write a get routine, or a set routine.
"""
# getset and inout just change things simply from a get to a set routine.
inout = "in"
if getset == "get":
inout = "out"
if isinstance(t, ft.Type):
this = self.prefix + 'this'
else:
this = 'dummy_this'
safe_i = self.prefix + 'i' # YANN: i could be in the "uses" clauses
# TODO: check if el.orig_name would be needed here instead of el.name
self.write('subroutine %s%s__array_%sitem__%s(%s, %s, %s)' % (self.prefix, t.name,
getset, el.name,
this,
safe_i,
el.name + 'item'))
self.indent()
self.write()
extra_uses = {}
if isinstance(t, ft.Module):
extra_uses[t.name] = ['%s_%s => %s' % (t.name, el.name, el.name)]
elif isinstance(t, ft.Type):
if 'super-type' in t.doc:
# YANN: propagate parameter uses
for use in t.uses:
if use[0] in extra_uses and use[1][0] not in extra_uses[use[0]]:
extra_uses[use[0]].append(use[1][0])
else:
extra_uses[use[0]] = [use[1][0]]
else:
extra_uses[t.mod_name] = [t.name]
mod = self.types[el.type].mod_name
el_tname = ft.strip_type(el.type)
if mod in extra_uses:
extra_uses[mod].append(el_tname)
else:
extra_uses[mod] = [el_tname]
self.write_uses_lines(el, extra_uses)
self.write('implicit none')
self.write()
if 'super-type' in t.doc:
self.write_super_type_lines(t)
# Check if the type has recursive definition:
same_type = (ft.strip_type(t.name) == ft.strip_type(el.type))
if isinstance(t, ft.Type):
self.write_type_lines(t.name)
self.write_type_lines(el.type,same_type)
self.write('integer, intent(in) :: %s(%d)' % (this, sizeof_fortran_t))
if isinstance(t, ft.Type):
self.write('type(%s_ptr_type) :: this_ptr' % t.name)
array_name = 'this_ptr%%p%%%s' % el.name
else:
array_name = '%s_%s' % (t.name, el.name)
self.write('integer, intent(in) :: %s' % (safe_i))
self.write('integer, intent(%s) :: %s(%d)' % (inout, el.name + 'item', sizeof_fortran_t))
if not same_type:
self.write('type(%s_ptr_type) :: %s_ptr' % (ft.strip_type(el.type), el.name))
else:
self.write('type(%s_rec_ptr_type) :: %s_ptr' % (ft.strip_type(el.type),el.name))
self.write()
if isinstance(t, ft.Type):
self.write('this_ptr = transfer(%s, this_ptr)' % (this))
if 'allocatable' in el.attributes:
self.write('if (allocated(%s)) then' % array_name)
self.indent()
self.write('if (%s < 1 .or. %s > size(%s)) then' % (safe_i, safe_i, array_name))
self.indent()
self.write('call %s("array index out of range")' % self.abort_func)
self.dedent()
self.write('else')
self.indent()
if getset == "get":
self.write('%s_ptr%%p => %s(%s)' % (el.name, array_name, safe_i))
self.write('%s = transfer(%s_ptr,%s)' % (el.name + 'item', el.name, el.name + 'item'))
else:
self.write('%s_ptr = transfer(%s,%s_ptr)' % (el.name, el.name + 'item', el.name))
self.write('%s(%s) = %s_ptr%%p' % (array_name, safe_i, el.name))
self.dedent()
self.write('endif')
if 'allocatable' in el.attributes:
self.dedent()
self.write('else')
self.indent()
self.write('call %s("derived type array not allocated")' % self.abort_func)
self.dedent()
self.write('end if')
self.dedent()
self.write('end subroutine %s%s__array_%sitem__%s' % (self.prefix, t.name,
getset, el.name))
self.write()
def _write_array_len(self, t, el, sizeof_fortran_t):
"""
Write a subroutine which returns the length of a derived-type array
Parameters
----------
t : `fortran.Type` node or `fortran.Module` node
Node of the parse tree which contains this derived-type as an element
el : `fortran.Element` node
An element of a module which is derived-type array
sizeof_fortan_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
"""
if isinstance(t, ft.Type):
this = self.prefix + 'this'
else:
this = 'dummy_this'
safe_n = self.prefix + 'n' # YANN: "n" could be in the "uses"
self.write('subroutine %s%s__array_len__%s(%s, %s)' % (self.prefix, t.name, el.name, this, safe_n))
self.indent()
self.write()
extra_uses = {}
if isinstance(t, ft.Module):
extra_uses[t.name] = ['%s_%s => %s' % (t.name, el.name, el.name)]
elif isinstance(t, ft.Type):
if 'super-type' in t.doc:
# YANN: propagate parameter uses
for use in t.uses:
if use[0] in extra_uses and use[1][0] not in extra_uses[use[0]]:
extra_uses[use[0]].append(use[1][0])
else:
extra_uses[use[0]] = [use[1][0]]
else:
extra_uses[self.types[t.name].mod_name] = [t.name]
mod = self.types[el.type].mod_name
el_tname = ft.strip_type(el.type)
if mod in extra_uses:
extra_uses[mod].append(el_tname)
else:
extra_uses[mod] = [el_tname]
self.write_uses_lines(el, extra_uses)
self.write('implicit none')
self.write()
if 'super-type' in t.doc:
self.write_super_type_lines(t)
# Check if the type has recursive definition:
same_type = (ft.strip_type(t.name) == ft.strip_type(el.type))
if isinstance(t, ft.Type):
self.write_type_lines(t.name)
self.write_type_lines(el.type,same_type)
self.write('integer, intent(out) :: %s' % (safe_n))
self.write('integer, intent(in) :: %s(%d)' % (this, sizeof_fortran_t))
if isinstance(t, ft.Type):
self.write('type(%s_ptr_type) :: this_ptr' % t.name)
self.write()
self.write('this_ptr = transfer(%s, this_ptr)' % (this))
array_name = 'this_ptr%%p%%%s' % el.name
else:
array_name = '%s_%s' % (t.name, el.name)
if 'allocatable' in el.attributes:
self.write('if (allocated(%s)) then' % array_name)
self.indent()
self.write('%s = size(%s)' % (safe_n, array_name))
if 'allocatable' in el.attributes:
self.dedent()
self.write('else')
self.indent()
self.write('%s = 0' % (safe_n))
self.dedent()
self.write('end if')
self.dedent()
self.write('end subroutine %s%s__array_len__%s' % (self.prefix, t.name, el.name))
self.write()
def _write_scalar_wrapper(self, t, el, sizeof_fortran_t, getset):
"""
Write get/set routines for scalar elements of derived-types and modules
Parameters
----------
t : `fortran.Type` node or `fortran.Module` node
Node of the parse tree which contains this derived-type as an element
el : `fortran.Element` node
An element of a module which is derived-type array
sizeof_fortan_t : `int`
The size, in bytes, of a pointer to a fortran derived type ??
getset : `str` {``"get"``,``"set"``}
String indicating whether to write a get routine, or a set routine.
"""
log.debug('writing %s wrapper for %s.%s' % (getset, t.name, el.name))
# getset and inout just change things simply from a get to a set routine.
inout = "in"
if getset == "get":
inout = "out"
if isinstance(t, ft.Type):
this = 'this, '
elif isinstance(t, ft.Module):
this = ''
else:
raise ValueError("Don't know how to write scalar wrappers for %s type %s"(t, type(t)))
# Get appropriate use statements
extra_uses = {}
if isinstance(t, ft.Module):
extra_uses[t.name] = ['%s_%s => %s' % (t.name, el.orig_name, el.orig_name)]
elif isinstance(t, ft.Type):
extra_uses[self.types[t.name].mod_name] = [t.name]
# Check if the type has recursive definition:
same_type = (ft.strip_type(t.name) == ft.strip_type(el.type))
if el.type.startswith('type') and not same_type:
mod = self.types[el.type].mod_name
el_tname = ft.strip_type(el.type)
if mod in extra_uses:
extra_uses[mod].append(el_tname)
else:
extra_uses[mod] = [el_tname]
# Prepend prefix to element name
# -- Since some cases require a safer localvar name, we always transform it
localvar = self.prefix + el.orig_name
self.write('subroutine %s%s__%s__%s(%s%s)' % (self.prefix, t.name,
getset, el.orig_name, this, localvar))
self.indent()
self.write_uses_lines(el, extra_uses)
self.write('implicit none')
if isinstance(t, ft.Type):
self.write_type_lines(t.name)
if el.type.startswith('type') and not (el.type == 'type(' + t.name + ')'):
self.write_type_lines(el.type)
if isinstance(t, ft.Type):
self.write('integer, intent(in) :: this(%d)' % sizeof_fortran_t)
self.write('type(%s_ptr_type) :: this_ptr' % t.name)
# Return/set by value
attributes = [attr for attr in el.attributes if attr not in
['pointer', 'allocatable', 'public', 'parameter', 'save']]
if el.type.startswith('type'):
# For derived types elements, treat as opaque reference
self.write('integer, intent(%s) :: %s(%d)' % (inout, localvar, sizeof_fortran_t))
self.write('type(%s_ptr_type) :: %s_ptr' % (ft.strip_type(el.type), el.orig_name))
self.write()
if isinstance(t, ft.Type):
self.write('this_ptr = transfer(this, this_ptr)')
if getset == "get":
if isinstance(t, ft.Type):
self.write('%s_ptr%%p => this_ptr%%p%%%s' % (el.orig_name, el.orig_name))
else:
self.write('%s_ptr%%p => %s_%s' % (el.orig_name, t.name, el.orig_name))
self.write('%s = transfer(%s_ptr,%s)' % (localvar, el.orig_name, localvar))
else:
self.write('%s_ptr = transfer(%s,%s_ptr)' % (el.orig_name,
localvar,
el.orig_name))
if isinstance(t, ft.Type):
self.write('this_ptr%%p%%%s = %s_ptr%%p' % (el.orig_name, el.orig_name))
else:
self.write('%s_%s = %s_ptr%%p' % (t.name, el.orig_name, el.orig_name))
else:
if attributes != []:
self.write('%s, %s, intent(%s) :: %s' % (el.type,
','.join(attributes),
inout, localvar))
else:
self.write('%s, intent(%s) :: %s' % (el.type, inout, localvar))
self.write()
if isinstance(t, ft.Type):
self.write('this_ptr = transfer(this, this_ptr)')
if getset == "get":
if isinstance(t, ft.Type):
self.write('%s = this_ptr%%p%%%s' % (localvar, el.orig_name))
else:
self.write('%s = %s_%s' % (localvar, t.name, el.orig_name))
else:
if isinstance(t, ft.Type):
self.write('this_ptr%%p%%%s = %s' % (el.orig_name, localvar))
else:
self.write('%s_%s = %s' % (t.name, el.orig_name, localvar))
self.dedent()
self.write('end subroutine %s%s__%s__%s' % (self.prefix, t.name, getset,
el.orig_name))
self.write()
|
jameskermode/f90wrap
|
f90wrap/f90wrapgen.py
|
Python
|
lgpl-3.0
| 35,177
|
[
"VisIt"
] |
bf16460a958f238e9588ade2f997a17111b3c4144c69633fea11421cd181d493
|
#!/usr/bin/env python
# Copyright (c) 2012 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
#
# This python code is used to migrate checkpoints that were created in one
# version of the simulator to newer version. As features are added or bugs are
# fixed some of the state that needs to be checkpointed can change. If you have
# many historic checkpoints that you use, manually editing them to fix them is
# both time consuming and error-prone.
# This script provides a way to migrate checkpoints to the newer repository in
# a programatic way. It can be imported into another script or used on the
# command line. From the command line the script will either migrate every
# checkpoint it finds recursively (-r option) or a single checkpoint. When a
# change is made to the gem5 repository that breaks previous checkpoints a
# from_N() method should be implemented here and the gem5CheckpointVersion
# variable in src/sim/serialize.hh should be incremented. For each version
# between the checkpoints current version and the new version the from_N()
# method will be run, passing in a ConfigParser object which contains the open
# file. As these operations can be isa specific the method can verify the isa
# and use regexes to find the correct sections that need to be updated.
import ConfigParser
import sys, os
import os.path as osp
# An example of a translator
def from_0(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all the execution contexts
if re.search('.*sys.*\.cpu.*\.x.\..*', sec):
# Update each one
mr = cpt.get(sec, 'miscRegs').split()
#mr.insert(21,0)
#mr.insert(26,0)
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
# The backing store supporting the memories in the system has changed
# in that it is now stored globally per address range. As a result the
# actual storage is separate from the memory controllers themselves.
def from_1(cpt):
for sec in cpt.sections():
import re
# Search for a physical memory
if re.search('.*sys.*\.physmem$', sec):
# Add the number of stores attribute to the global physmem
cpt.set(sec, 'nbr_of_stores', '1')
# Get the filename and size as this is moving to the
# specific backing store
mem_filename = cpt.get(sec, 'filename')
mem_size = cpt.get(sec, '_size')
cpt.remove_option(sec, 'filename')
cpt.remove_option(sec, '_size')
# Get the name so that we can create the new section
system_name = str(sec).split('.')[0]
section_name = system_name + '.physmem.store0'
cpt.add_section(section_name)
cpt.set(section_name, 'store_id', '0')
cpt.set(section_name, 'range_size', mem_size)
cpt.set(section_name, 'filename', mem_filename)
elif re.search('.*sys.*\.\w*mem$', sec):
# Due to the lack of information about a start address,
# this migration only works if there is a single memory in
# the system, thus starting at 0
raise ValueError("more than one memory detected (" + sec + ")")
def from_2(cpt):
for sec in cpt.sections():
import re
# Search for a CPUs
if re.search('.*sys.*cpu', sec):
try:
junk = cpt.get(sec, 'instCnt')
cpt.set(sec, '_pid', '0')
except ConfigParser.NoOptionError:
pass
# The ISA is now a separate SimObject, which means that we serialize
# it in a separate section instead of as a part of the ThreadContext.
def from_3(cpt):
isa = cpt.get('root','isa')
isa_fields = {
"alpha" : ( "fpcr", "uniq", "lock_flag", "lock_addr", "ipr" ),
"arm" : ( "miscRegs" ),
"sparc" : ( "asi", "tick", "fprs", "gsr", "softint", "tick_cmpr",
"stick", "stick_cmpr", "tpc", "tnpc", "tstate", "tt",
"tba", "pstate", "tl", "pil", "cwp", "gl", "hpstate",
"htstate", "hintp", "htba", "hstick_cmpr",
"strandStatusReg", "fsr", "priContext", "secContext",
"partId", "lsuCtrlReg", "scratchPad",
"cpu_mondo_head", "cpu_mondo_tail",
"dev_mondo_head", "dev_mondo_tail",
"res_error_head", "res_error_tail",
"nres_error_head", "nres_error_tail",
"tick_intr_sched",
"cpu", "tc_num", "tick_cmp", "stick_cmp", "hstick_cmp"),
"x86" : ( "regVal" ),
}
isa_fields = isa_fields.get(isa, [])
isa_sections = []
for sec in cpt.sections():
import re
re_cpu_match = re.match('^(.*sys.*\.cpu[^.]*)\.xc\.(.+)$', sec)
# Search for all the execution contexts
if not re_cpu_match:
continue
if re_cpu_match.group(2) != "0":
# This shouldn't happen as we didn't support checkpointing
# of in-order and O3 CPUs.
raise ValueError("Don't know how to migrate multi-threaded CPUs "
"from version 1")
isa_section = []
for fspec in isa_fields:
for (key, value) in cpt.items(sec, raw=True):
if key in isa_fields:
isa_section.append((key, value))
name = "%s.isa" % re_cpu_match.group(1)
isa_sections.append((name, isa_section))
for (key, value) in isa_section:
cpt.remove_option(sec, key)
for (sec, options) in isa_sections:
# Some intermediate versions of gem5 have empty ISA sections
# (after we made the ISA a SimObject, but before we started to
# serialize into a separate ISA section).
if not cpt.has_section(sec):
cpt.add_section(sec)
else:
if cpt.items(sec):
raise ValueError("Unexpected populated ISA section in old "
"checkpoint")
for (key, value) in options:
cpt.set(sec, key, value)
# Version 5 of the checkpoint format removes the MISCREG_CPSR_MODE
# register from the ARM register file.
def from_4(cpt):
if cpt.get('root','isa') == 'arm':
for sec in cpt.sections():
import re
# Search for all ISA sections
if re.search('.*sys.*\.cpu.*\.isa', sec):
mr = cpt.get(sec, 'miscRegs').split()
# Remove MISCREG_CPSR_MODE
del mr[137]
cpt.set(sec, 'miscRegs', ' '.join(str(x) for x in mr))
migrations = []
migrations.append(from_0)
migrations.append(from_1)
migrations.append(from_2)
migrations.append(from_3)
migrations.append(from_4)
verbose_print = False
def verboseprint(*args):
if not verbose_print:
return
for arg in args:
print arg,
print
def process_file(path, **kwargs):
if not osp.isfile(path):
import errno
raise IOError(ennro.ENOENT, "No such file", path)
verboseprint("Processing file %s...." % path)
if kwargs.get('backup', True):
import shutil
shutil.copyfile(path, path + '.bak')
cpt = ConfigParser.SafeConfigParser()
# gem5 is case sensitive with paramaters
cpt.optionxform = str
# Read the current data
cpt_file = file(path, 'r')
cpt.readfp(cpt_file)
cpt_file.close()
# Make sure we know what we're starting from
if not cpt.has_option('root','cpt_ver'):
raise LookupError("cannot determine version of checkpoint")
cpt_ver = cpt.getint('root','cpt_ver')
# If the current checkpoint is longer than the migrations list, we have a problem
# and someone didn't update this file
if cpt_ver > len(migrations):
raise ValueError("upgrade script is too old and needs updating")
verboseprint("\t...file is at version %#x" % cpt_ver)
if cpt_ver == len(migrations):
verboseprint("\t...nothing to do")
return
# Walk through every function from now until the end fixing the checkpoint
for v in xrange(cpt_ver,len(migrations)):
verboseprint("\t...migrating to version %#x" % (v + 1))
migrations[v](cpt)
cpt.set('root','cpt_ver', str(v + 1))
# Write the old data back
verboseprint("\t...completed")
cpt.write(file(path, 'w'))
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("usage: %prog [options] <filename or directory>")
parser.add_option("-r", "--recurse", action="store_true",
help="Recurse through all subdirectories modifying "\
"each checkpoint that is found")
parser.add_option("-N", "--no-backup", action="store_false",
dest="backup", default=True,
help="Do no backup each checkpoint before modifying it")
parser.add_option("-v", "--verbose", action="store_true",
help="Print out debugging information as")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("You must specify a checkpoint file to modify or a "\
"directory of checkpoints to recursively update")
verbose_print = options.verbose
# Deal with shell variables and ~
path = osp.expandvars(osp.expanduser(args[0]))
# Process a single file if we have it
if osp.isfile(path):
process_file(path, **vars(options))
# Process an entire directory
elif osp.isdir(path):
cpt_file = osp.join(path, 'm5.cpt')
if options.recurse:
# Visit very file and see if it matches
for root,dirs,files in os.walk(path):
for name in files:
if name == 'm5.cpt':
process_file(osp.join(root,name), **vars(options))
for dir in dirs:
pass
# Maybe someone passed a cpt.XXXXXXX directory and not m5.cpt
elif osp.isfile(cpt_file):
process_file(cpt_file, **vars(options))
else:
print "Error: checkpoint file not found at in %s " % path,
print "and recurse not specified"
sys.exit(1)
sys.exit(0)
|
tiagormk/gem5-hmp
|
util/cpt_upgrader.py
|
Python
|
bsd-3-clause
| 12,383
|
[
"VisIt"
] |
12ba8a12b1027acbbd5f2c58d29f380bb92626852fb153739af966149eb9d990
|
#!/usr/bin/env python
"""
This file tests vtk.util.vtkImageExportToArray and
vtk.util.vtkImageImportFromArray. It tests the code by first
exporting a PNG image to a Numeric Array and then converts the array
to an image and compares that image to the original image. It does
this for all PNG images in a particular directory.
The test naturally requires Numeric Python to be installed:
http://numpy.sf.net
"""
# This test requires Numeric.
import sys
try:
import numpy.core.numeric as numeric
except ImportError:
print "WARNING: This test requires Numeric Python: http://numpy.sf.net"
sys.exit(0)
import os
import glob
import vtk
from vtk.test import Testing
from vtk.util.vtkImageExportToArray import vtkImageExportToArray
from vtk.util.vtkImageImportFromArray import vtkImageImportFromArray
class TestNumericArrayImageData(Testing.vtkTest):
def testImportExport(self):
"Testing if images can be imported to and from numeric arrays."
imp = vtkImageImportFromArray()
exp = vtkImageExportToArray()
idiff = vtk.vtkImageDifference()
img_dir = Testing.getAbsImagePath("")
for i in glob.glob(os.path.join(img_dir, "*.png")):
# Putting the reader outside the loop causes bad problems.
reader = vtk.vtkPNGReader()
reader.SetFileName(i)
reader.Update()
# convert the image to a Numeric Array and convert it back
# to an image data.
exp.SetInputConnection(reader.GetOutputPort())
imp.SetArray(exp.GetArray())
# ensure there is no difference between orig image and the
# one we converted and un-converted.
idiff.SetInputConnection(imp.GetOutputPort())
idiff.SetImage(reader.GetOutput())
idiff.Update()
err = idiff.GetThresholdedError()
msg = "Test failed on image %s, with threshold "\
"error: %d"%(i, err)
self.assertEqual(err, 0.0, msg)
if __name__ == "__main__":
Testing.main([(TestNumericArrayImageData, 'test')])
|
ashray/VTK-EVM
|
Common/DataModel/Testing/Python/TestNumericArrayImageData.py
|
Python
|
bsd-3-clause
| 2,101
|
[
"VTK"
] |
bf5c12b1b3358a5c0c641c293da10619b64191d5f536a4b5629d41ecaf9a7fbb
|
import ast
import os
from jaspyx.ast_util import ast_load, ast_store, ast_call
from jaspyx.visitor import BaseVisitor
class Import(BaseVisitor):
import_path = ['.']
def load_module(self, pieces):
module_name = '.'.join(pieces)
if module_name in self.registry:
return
if len(pieces) > 1:
parent = self.registry['.'.join(pieces[:-1])]
import_path = [os.path.split(parent.path)[0]]
else:
import_path = self.import_path
for path in import_path:
module_path = os.path.join(path, pieces[-1], '__init__.jpx')
if os.path.exists(module_path):
break
module_path = os.path.join(path, pieces[-1]) + '.jpx'
if os.path.isfile(module_path):
break
else:
raise ImportError('module %s not found' % module_name)
c = ast.parse(open(module_path).read(), module_path)
self.registry[module_name] = v = self.__class__(module_path, self.registry, indent=self.default_indent)
v.import_path = self.import_path
v.visit(c)
def init_module(self, module_path):
for i in range(len(module_path)):
self.load_module(module_path[:i + 1])
return ast_call(
ast_call(ast_load('JS'), ast.Str('__import__')),
ast_call(ast_load('JS'), ast.Str('__module__')),
ast.Str('.'.join(module_path))
)
def visit_Import(self, node):
for name in node.names:
module_path = name.name.split('.')
import_module = self.init_module(module_path)
if not name.asname:
self.visit(ast.Expr(import_module))
self.visit(
ast.Assign(
[ast_store(module_path[0])],
self.init_module(module_path[:1])
)
)
else:
self.visit(
ast.Assign(
[ast_store(name.asname)],
import_module
)
)
def visit_ImportFrom(self, node):
if node.level:
raise NotImplementedError('Relative imports are not supported')
module_path = node.module.split('.')
import_module = self.init_module(module_path)
if len(node.names) > 1 or node.names[0].name == '*':
self.visit(ast.Assign(
[ast_store('$t1')],
import_module
))
import_from = ast_load('$t1')
else:
import_from = import_module
if node.names[0].name == '*':
name = node.names[0]
if name.name == '*':
if self.stack[-1].scope.prefix != ['__module__']:
raise NotImplementedError('from x import * only implemented at module level')
self.visit(ast.For(
ast_store('$t2'),
import_from,
[
ast.Assign(
[
ast.Subscript(
ast_call(ast_load('JS'), ast.Str('__module__')),
ast.Index(ast_load('$t2')),
ast.Load()
)
],
ast.Subscript(
import_from,
ast.Index(ast_load('$t2')),
ast.Load(),
)
),
],
[]
))
else:
for name in node.names:
asname = name.asname if name.asname else name.name
self.visit(
ast.Assign(
[ast_store(asname)],
ast.Attribute(
import_from,
name.name,
ast.Load()
)
)
)
|
iksteen/jaspyx
|
jaspyx/visitor/import_.py
|
Python
|
mit
| 4,183
|
[
"VisIt"
] |
69764f6a67434c065fdbc60678944556943fadf769b134689b23bb9dc400f480
|
#!/usr/bin/env python
#----------------------------------------------------------------------------
# Name: test7.py
# Purpose: A minimal wxPython test program
#
# Author: Robin Dunn
#
# Created: A long time ago, in a galaxy far, far away...
# Copyright: (c) 1998 by Total Control Software
# Licence: wxWidgets license
#----------------------------------------------------------------------------
# NOTE: this sample requires wxPython 2.6 or newer
# import the wxPython GUI package
import wx
# Create a new frame class, derived from the wxPython Frame.
class MyFrame(wx.Frame):
def __init__(self, parent, id, title):
# First, call the base class' __init__ method to create the frame
wx.Frame.__init__(self, parent, id, title)
# Associate some events with methods of this class
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_MOVE, self.OnMove)
# Add a panel and some controls to display the size and position
panel = wx.Panel(self, -1)
label1 = wx.StaticText(panel, -1, "Size:")
label2 = wx.StaticText(panel, -1, "Pos:")
self.sizeCtrl = wx.TextCtrl(panel, -1, "", style=wx.TE_READONLY)
self.posCtrl = wx.TextCtrl(panel, -1, "", style=wx.TE_READONLY)
self.panel = panel
# Use some sizers for layout of the widgets
sizer = wx.FlexGridSizer(2, 2, 5, 5)
sizer.Add(label1)
sizer.Add(self.sizeCtrl)
sizer.Add(label2)
sizer.Add(self.posCtrl)
border = wx.BoxSizer()
border.Add(sizer, 0, wx.ALL, 15)
panel.SetSizerAndFit(border)
self.Fit()
# This method is called by the System when the window is resized,
# because of the association above.
def OnSize(self, event):
size = event.GetSize()
self.sizeCtrl.SetValue("%s, %s" % (size.width, size.height))
# tell the event system to continue looking for an event handler,
# so the default handler will get called.
event.Skip()
# This method is called by the System when the window is moved,
# because of the association above.
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" % (pos.x, pos.y))
# Every wxWidgets application must have a class derived from wx.App
class MyApp(wx.App):
# wxWindows calls this method to initialize the application
def OnInit(self):
# Create an instance of our customized Frame class
frame = MyFrame(None, -1, "This is a test")
frame.Show(True)
# Tell wxWindows that this is our main window
self.SetTopWindow(frame)
# Return a success flag
return True
app = MyApp(0) # Create an instance of the application class
app.MainLoop() # Tell it to start processing events
|
cestaberous/lbgprg
|
py/gui/0_information/wxPythonTest.py
|
Python
|
gpl-2.0
| 2,859
|
[
"Galaxy"
] |
a365e87a571929754c75e84d1a2e670ddc73fdb43b3bf7588db7d40bf2c8f74f
|
# -*- coding:utf-8 -*-
from brian import Equations
from brian.units import *
from brian.stdunits import *
from model import PARAMETERS as ps
# If the model parameters have been initialized
if ps != None:
N_subpop = ps['Common']['N_subpop']
PSSYN = ps['Synapse']
V_E = PSSYN['V_E']
V_act_E = PSSYN['V_act_E']
g_E = PSSYN['g_E']
sigma_E = PSSYN['sigma_E']
alpha_E = PSSYN['alpha_E']
beta_E = PSSYN['beta_E']
V_I = PSSYN['V_I']
V_act_I = PSSYN['V_act_I']
g_I = PSSYN['g_I']
sigma_I = PSSYN['sigma_I']
alpha_I = PSSYN['alpha_I']
beta_I = PSSYN['beta_I']
class Synapse:
"""A Synapse is a connection between a mitral cell and a granule cell.
Synapses are modeled with a set of ordinary differential equations (ODE).
There are two kind of synapses: inhibitory and excitatory. Each kind of
synapse has its set of ODE, but they share common features.
Attributes
----------
is_exc : bool
True if the synapse is excitatory, False if inhibitory
is_inhib : bool
True if the synapse is inhibitory, False if excitatory
eqs_model : brian.Equations
Set of equations that define the model
"""
def __init__(self, synapse_type):
"""Initialize a Synapse.
At this point, the equation model is empty. The user must define the
equation model after the initialization by using the
:meth:`set_eqs_model`.
"""
if synapse_type[:3] == 'exc':
self.is_exc = True
elif synapse_type[:3] == 'inh':
self.is_exc = False
self.is_inhib = not self.is_exc
self.eqs_model = Equations()
def get_eqs_model(self):
"""Get the model of equations of this synapse"""
return self.eqs_model
def set_eqs_model(self, eqs=None):
"""Sets the model of equations.
A standard model is used if no model is specified with :attr:`eqs`.
If no model if specified, uses the standard model (according to the
synapse type).
Parameters
----------
eqs : brian.Equations
Set of equations that define the model
Notes
-----
This method must be called after creating a :class:`Synapse`, otherwise
the equation model is empty.
"""
if eqs:
self.eqs_model = eqs
else:
if self.is_inhib:
self.eqs_model = Equations('''
I_syn = g_E * s_syn * (V_D - V_E) : amp*meter**-2
ds/dt = alpha_I * (1 - s) * T - beta_I * s : 1
T = 1/(1 + exp(-1*(V_D - V_act_I)/sigma_I)) : 1
s_syn : 1
s_syn_self : 1
''')
else:
self.eqs_model = Equations('''
I_syn = g_I * s_syn * (V - V_I) : amp*meter**-2
ds/dt = alpha_E * (1 - s) * T - beta_E * s : 1
T : 1
s_syn : 1
''')
|
neuro-lyon/multiglom-model
|
src/model/synapse.py
|
Python
|
mit
| 3,066
|
[
"Brian"
] |
c303f64dad6308c9531eb7e44f3084838a3c4f90a47b32166d7326b0aa29861e
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Implement MD Integrator."""
import itertools
from hoomd.md import _md
from hoomd.data.parameterdicts import ParameterDict
from hoomd.data.typeconverter import OnlyFrom, OnlyTypes
from hoomd.integrate import BaseIntegrator
from hoomd.data import syncedlist
from hoomd.md.methods import Method
from hoomd.md.force import Force
from hoomd.md.constrain import Constraint, Rigid
def _preprocess_aniso(value):
if value is True:
return "true"
elif value is False:
return "false"
else:
return value
def _set_synced_list(old_list, new_list):
old_list.clear()
old_list.extend(new_list)
class _DynamicIntegrator(BaseIntegrator):
def __init__(self, forces, constraints, methods, rigid):
forces = [] if forces is None else forces
constraints = [] if constraints is None else constraints
methods = [] if methods is None else methods
self._forces = syncedlist.SyncedList(
Force, syncedlist._PartialGetAttr('_cpp_obj'), iterable=forces)
self._constraints = syncedlist.SyncedList(
OnlyTypes(Constraint, disallow_types=(Rigid,)),
syncedlist._PartialGetAttr('_cpp_obj'),
iterable=constraints)
self._methods = syncedlist.SyncedList(
Method, syncedlist._PartialGetAttr('_cpp_obj'), iterable=methods)
param_dict = ParameterDict(rigid=OnlyTypes(Rigid, allow_none=True))
if rigid is not None and rigid._added:
raise ValueError("Rigid object can only belong to one integrator.")
param_dict["rigid"] = rigid
self._param_dict.update(param_dict)
def _attach(self):
self.forces._sync(self._simulation, self._cpp_obj.forces)
self.constraints._sync(self._simulation, self._cpp_obj.constraints)
self.methods._sync(self._simulation, self._cpp_obj.methods)
super()._attach()
if self.rigid is not None:
self.rigid._attach()
self._cpp_obj.rigid = self.rigid._cpp_obj
def _detach(self):
self._forces._unsync()
self._methods._unsync()
self._constraints._unsync()
if self.rigid is not None:
self.rigid._detach()
super()._detach()
def _remove(self):
if self.rigid is not None:
self.rigid._remove()
super()._remove()
def _add(self, simulation):
super()._add(simulation)
if self.rigid is not None:
self.rigid._add(simulation)
@property
def forces(self):
return self._forces
@forces.setter
def forces(self, value):
_set_synced_list(self._forces, value)
@property
def constraints(self):
return self._constraints
@constraints.setter
def constraints(self, value):
_set_synced_list(self._constraints, value)
@property
def methods(self):
return self._methods
@methods.setter
def methods(self, value):
_set_synced_list(self._methods, value)
@property
def _children(self):
children = list(self.forces)
children.extend(self.constraints)
children.extend(self.methods)
for child in itertools.chain(self.forces, self.constraints,
self.methods):
children.extend(child._children)
return children
def _getattr_param(self, attr):
if attr == "rigid":
return self._param_dict["rigid"]
return super()._getattr_param(attr)
def _setattr_param(self, attr, value):
if attr == "rigid":
self._set_rigid(value)
return
super()._setattr_param(attr, value)
def _set_rigid(self, new_rigid):
"""Handles the adding and detaching of potential Rigid objects."""
# this generally only happens when attaching and we can ignore it since
# we attach the rigid body in _attach.
if new_rigid is self.rigid:
return
old_rigid = self.rigid
if new_rigid is not None and new_rigid._added:
raise ValueError("Cannot add Rigid object to multiple integrators.")
if old_rigid is not None:
if self._attached:
old_rigid._detach()
if self._added:
old_rigid._remove()
if new_rigid is None:
self._param_dict["rigid"] = None
if self._attached:
self._cpp_obj.rigid = None
return
if self._added:
new_rigid._add(self._simulation)
if self._attached:
self.rigid._attach()
self._cpp_obj.rigid = new_rigid._cpp_obj
self._param_dict["rigid"] = new_rigid
class Integrator(_DynamicIntegrator):
"""Enables a variety of standard integration methods.
Args:
dt (float): Integrator time step size :math:`[\\mathrm{time}]`.
methods (Sequence[hoomd.md.methods.Method]): Sequence of integration
methods. Each integration method can be applied to only a specific
subset of particles. The intersection of the subsets must be null.
The default value of ``None`` initializes an empty list.
forces (Sequence[hoomd.md.force.Force]): Sequence of forces applied to
the particles in the system. All the forces are summed together.
The default value of ``None`` initializes an empty list.
aniso (str or bool): Whether to integrate rotational degrees of freedom
(bool), default 'auto' (autodetect if there is anisotropic factor
from any defined active or constraint forces).
constraints (Sequence[hoomd.md.constrain.Constraint]): Sequence of
constraint forces applied to the particles in the system.
The default value of ``None`` initializes an empty list. Rigid body
objects (i.e. `hoomd.md.constrain.Rigid`) are not allowed in the
list.
rigid (hoomd.md.constrain.Rigid): A rigid bodies object defining the
rigid bodies in the simulation.
Classes of the following modules can be used as elements in `methods`:
- `hoomd.md.methods`
- `hoomd.md.methods.rattle`
The classes of following modules can be used as elements in `forces`
- `hoomd.md.angle`
- `hoomd.md.bond`
- `hoomd.md.charge`
- `hoomd.md.dihedral`
- `hoomd.md.external.field`
- `hoomd.md.force`
- `hoomd.md.improper`
- `hoomd.md.pair`
- `hoomd.md.wall`
- `hoomd.md.special_pair`
The classes of the following module can be used as elements in `constraints`
- `hoomd.md.constrain`
Examples::
nlist = hoomd.md.nlist.Cell()
lj = hoomd.md.pair.LJ(nlist=nlist)
lj.params.default = dict(epsilon=1.0, sigma=1.0)
lj.r_cut[('A', 'A')] = 2**(1/6)
nve = hoomd.md.methods.NVE(filter=hoomd.filter.All())
integrator = hoomd.md.Integrator(dt=0.001, methods=[nve], forces=[lj])
sim.operations.integrator = integrator
Attributes:
dt (float): Integrator time step size :math:`[\\mathrm{time}]`.
methods (list[hoomd.md.methods.Method]): List of integration methods.
Each integration method can be applied to only a specific subset of
particles.
forces (list[hoomd.md.force.Force]): List of forces applied to
the particles in the system. All the forces are summed together.
aniso (str): Whether rotational degrees of freedom are integrated.
constraints (list[hoomd.md.constrain.Constraint]): List of
constraint forces applied to the particles in the system.
rigid (hoomd.md.constrain.Rigid): The rigid body definition for the
simulation associated with the integrator.
"""
def __init__(self,
dt,
aniso='auto',
forces=None,
constraints=None,
methods=None,
rigid=None):
super().__init__(forces, constraints, methods, rigid)
self._param_dict.update(
ParameterDict(dt=float(dt),
aniso=OnlyFrom(['true', 'false', 'auto'],
preprocess=_preprocess_aniso),
_defaults={"aniso": "auto"}))
if aniso is not None:
self.aniso = aniso
def _attach(self):
# initialize the reflected c++ class
self._cpp_obj = _md.IntegratorTwoStep(
self._simulation.state._cpp_sys_def, self.dt)
# Call attach from DynamicIntegrator which attaches forces,
# constraint_forces, and methods, and calls super()._attach() itself.
super()._attach()
|
joaander/hoomd-blue
|
hoomd/md/integrate.py
|
Python
|
bsd-3-clause
| 8,904
|
[
"HOOMD-blue"
] |
4de1a222d150fcafd74ac5d84a31112444736cc3d6098d95e90a7fdec2c4cff9
|
""" Collection of user jobs for testing purposes
"""
# pylint: disable=invalid-name
from __future__ import print_function
import os
from DIRAC import rootPath
from DIRAC.Interfaces.API.Job import Job
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.tests.Utilities.utils import find_all
# parameters
# Common functions
def getJob(jobClass=None):
if not jobClass:
jobClass = Job
oJob = jobClass()
return oJob
def getDIRAC(diracClass=None):
if not diracClass:
diracClass = Dirac
oDirac = diracClass()
return oDirac
def baseToAllJobs(jName, jobClass=None):
print("**********************************************************************************************************")
print("\n Submitting job ", jName)
J = getJob(jobClass)
J.setName(jName)
J.setCPUTime(17800)
return J
def endOfAllJobs(J):
result = getDIRAC().submitJob(J)
print("Job submission result:", result)
if result['OK']:
print("Submitted with job ID:", result['Value'])
return result
# List of jobs
def helloWorld():
""" simple hello world job
"""
J = baseToAllJobs('helloWorld')
try:
J.setInputSandbox([find_all('exe-script.py', rootPath, 'DIRAC/tests/Workflow')[0]])
except IndexError: # we are in Jenkins
J.setInputSandbox([find_all('exe-script.py', os.environ['WORKSPACE'], 'DIRAC/tests/Workflow')[0]])
J.setExecutable("exe-script.py", "", "helloWorld.log")
return endOfAllJobs(J)
def mpJob():
""" simple hello world job, with MultiProcessor tag
"""
J = baseToAllJobs('mpJob')
try:
J.setInputSandbox([find_all('mpTest.py', rootPath, 'DIRAC/tests/Utilities')[0]] +
[find_all('testMpJob.sh', rootPath, 'DIRAC/tests/Utilities')[0]])
except IndexError: # we are in Jenkins
J.setInputSandbox([find_all('mpTest.py', os.environ['WORKSPACE'], 'DIRAC/tests/Utilities')[0]] +
[find_all('testMpJob.sh', os.environ['WORKSPACE'], 'DIRAC/tests/Utilities')[0]])
J.setExecutable('testMpJob.sh mpTest.py')
J.setTag('MultiProcessor')
return endOfAllJobs(J)
def parametricJob():
""" Creates a parametric job with 3 subjobs which are simple hello world jobs
"""
J = baseToAllJobs('helloWorld')
try:
J.setInputSandbox([find_all('exe-script.py', rootPath, 'DIRAC/tests/Workflow')[0]])
except IndexError: # we are in Jenkins
J.setInputSandbox([find_all('exe-script.py', os.environ['WORKSPACE'], 'DIRAC/tests/Workflow')[0]])
J.setParameterSequence("args", ['one', 'two', 'three'])
J.setParameterSequence("iargs", [1, 2, 3])
J.setExecutable("exe-script.py", arguments=": testing %(args)s %(iargs)s", logFile='helloWorld_%n.log')
return endOfAllJobs(J)
|
petricm/DIRAC
|
tests/Utilities/testJobDefinitions.py
|
Python
|
gpl-3.0
| 2,690
|
[
"DIRAC"
] |
906afb7b99460bd9d68077d27f25ea3c8dfea77acc3647268becdbfac208babb
|
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment program DIALIGN2-2.
"""
from __future__ import print_function
from Bio.Application import _Option, _Argument, _Switch, AbstractCommandline
class DialignCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program DIALIGN2-2.
http://bibiserv.techfak.uni-bielefeld.de/dialign/welcome.html
Example:
--------
To align a FASTA file (unaligned.fasta) with the output files names
aligned.* including a FASTA output file (aligned.fa), use:
>>> from Bio.Align.Applications import DialignCommandline
>>> dialign_cline = DialignCommandline(input="unaligned.fasta",
... fn="aligned", fa=True)
>>> print(dialign_cline)
dialign2-2 -fa -fn aligned unaligned.fasta
You would typically run the command line with dialign_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Citation:
---------
B. Morgenstern (2004). DIALIGN: Multiple DNA and Protein Sequence
Alignment at BiBiServ. Nucleic Acids Research 32, W33-W36.
Last checked against version: 2.2
"""
def __init__(self, cmd="dialign2-2", **kwargs):
self.program_name = cmd
self.parameters = \
[
_Switch(["-afc", "afc"],
"Creates additional output file '*.afc' "
"containing data of all fragments considered "
"for alignment WARNING: this file can be HUGE !"),
_Switch(["-afc_v", "afc_v"],
"Like '-afc' but verbose: fragments are explicitly "
"printed. WARNING: this file can be EVEN BIGGER !"),
_Switch(["-anc", "anc"],
"Anchored alignment. Requires a file <seq_file>.anc "
"containing anchor points."),
_Switch(["-cs", "cs"],
"If segments are translated, not only the `Watson "
"strand' but also the `Crick strand' is looked at."),
_Switch(["-cw", "cw"],
"Additional output file in CLUSTAL W format."),
_Switch(["-ds", "ds"],
"`dna alignment speed up' - non-translated nucleic acid "
"fragments are taken into account only if they start "
"with at least two matches. Speeds up DNA alignment at "
"the expense of sensitivity."),
_Switch(["-fa", "fa"],
"Additional output file in FASTA format."),
_Switch(["-ff", "ff"],
"Creates file *.frg containing information about all "
"fragments that are part of the respective optimal "
"pairwise alignmnets plus information about "
"consistency in the multiple alignment"),
_Option(["-fn", "fn"],
"Output files are named <out_file>.<extension>.",
equate=False),
_Switch(["-fop", "fop"],
"Creates file *.fop containing coordinates of all "
"fragments that are part of the respective pairwise alignments."),
_Switch(["-fsm", "fsm"],
"Creates file *.fsm containing coordinates of all "
"fragments that are part of the final alignment"),
_Switch(["-iw", "iw"],
"Overlap weights switched off (by default, overlap "
"weights are used if up to 35 sequences are aligned). "
"This option speeds up the alignment but may lead "
"to reduced alignment quality."),
_Switch(["-lgs", "lgs"],
"`long genomic sequences' - combines the following "
"options: -ma, -thr 2, -lmax 30, -smin 8, -nta, -ff, "
"-fop, -ff, -cs, -ds, -pst "),
_Switch(["-lgs_t", "lgs_t"],
"Like '-lgs' but with all segment pairs assessed "
"at the peptide level (rather than 'mixed alignments' "
"as with the '-lgs' option). Therefore faster than "
"-lgs but not very sensitive for non-coding regions."),
_Option(["-lmax", "lmax"],
"Maximum fragment length = x (default: x = 40 or "
"x = 120 for `translated' fragments). Shorter x "
"speeds up the program but may affect alignment quality.",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Switch(["-lo", "lo"],
"(Long Output) Additional file *.log with information "
"about fragments selected for pairwise alignment and "
"about consistency in multi-alignment procedure."),
_Switch(["-ma", "ma"],
"`mixed alignments' consisting of P-fragments and "
"N-fragments if nucleic acid sequences are aligned."),
_Switch(["-mask", "mask"],
"Residues not belonging to selected fragments are "
"replaced by `*' characters in output alignment "
"(rather than being printed in lower-case characters)"),
_Switch(["-mat", "mat"],
"Creates file *mat with substitution counts derived "
"from the fragments that have been selected for alignment."),
_Switch(["-mat_thr", "mat_thr"],
"Like '-mat' but only fragments with weight score "
"> t are considered"),
_Switch(["-max_link", "max_link"],
"'maximum linkage' clustering used to construct "
"sequence tree (instead of UPGMA)."),
_Switch(["-min_link", "min_link"],
"'minimum linkage' clustering used."),
_Option(["-mot", "mot"],
"'motif' option.",
equate=False),
_Switch(["-msf", "msf"],
"Separate output file in MSF format."),
_Switch(["-n", "n"],
"Input sequences are nucleic acid sequences. "
"No translation of fragments."),
_Switch(["-nt", "nt"],
"Input sequences are nucleic acid sequences and "
"`nucleic acid segments' are translated to `peptide "
"segments'."),
_Switch(["-nta", "nta"],
"`no textual alignment' - textual alignment suppressed. "
"This option makes sense if other output files are of "
"interest -- e.g. the fragment files created with -ff, "
"-fop, -fsm or -lo."),
_Switch(["-o", "o"],
"Fast version, resulting alignments may be slightly "
"different."),
_Switch(["-ow", "ow"],
"Overlap weights enforced (By default, overlap weights "
"are used only if up to 35 sequences are aligned since "
"calculating overlap weights is time consuming)."),
_Switch(["-pst", "pst"],
"'print status'. Creates and updates a file *.sta with "
"information about the current status of the program "
"run. This option is recommended if large data sets "
"are aligned since it allows the user to estimate the "
"remaining running time."),
_Switch(["-smin", "smin"],
"Minimum similarity value for first residue pair "
"(or codon pair) in fragments. Speeds up protein "
"alignment or alignment of translated DNA fragments "
"at the expense of sensitivity."),
_Option(["-stars", "stars"],
"Maximum number of `*' characters indicating degree "
"of local similarity among sequences. By default, no "
"stars are used but numbers between 0 and 9, instead.",
checker_function=lambda x: x in range(0, 10),
equate=False),
_Switch(["-stdo", "stdo"],
"Results written to standard output."),
_Switch(["-ta", "ta"],
"Standard textual alignment printed (overrides "
"suppression of textual alignments in special "
"options, e.g. -lgs)"),
_Option(["-thr", "thr"],
"Threshold T = x.",
checker_function=lambda x: isinstance(x, int),
equate=False),
_Switch(["-xfr", "xfr"],
"'exclude fragments' - list of fragments can be "
"specified that are NOT considered for pairwise alignment"),
_Argument(["input"],
"Input file name. Must be FASTA format",
filename=True,
is_required=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/Align/Applications/_Dialign.py
|
Python
|
mit
| 9,585
|
[
"Biopython"
] |
7b156a3ba5634b5c08bfdf3899a6667783bf6c316b9d02d1d0b094cf5984d1a2
|
########################################################################
# $HeadURL $
# File: OperationHandlerBaseTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/25 08:09:08
########################################################################
""" :mod: OperationHandlerBaseTests
===============================
.. module: OperationHandlerBaseTests
:synopsis: unittests for OperationHandlerBase
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unittests for OperationHandlerBase
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id $"
# #
# @file OperationHandlerBaseTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/25 08:09:21
# @brief Definition of OperationHandlerBaseTests class.
# # imports
import unittest
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
########################################################################
class OperationHandlerBaseTests( unittest.TestCase ):
"""
.. class:: OperationHandlerBaseTests
"""
def setUp( self ):
""" test set up """
self.req = Request()
self.req.RequestName = "testRequest"
self.op = Operation( {"Type" : "ForwardDISET", "Arguments" : "foobar" } )
self.req.addOperation( self.op )
self.baseOp = OperationHandlerBase()
def tearDown( self ):
""" test tear down """
del self.baseOp
del self.op
del self.req
def testOperationHandlerBase( self ):
""" base op test """
self.baseOp.setOperation( self.op )
# # log is there
self.assertEqual( "log" in dir( self.baseOp ), True, "log missing" )
# # operation is there
self.assertEqual( "operation" in dir( self.baseOp ), True, "operation is missing" )
# # request is there
self.assertEqual( "request" in dir( self.baseOp ), True, "request is missing" )
# # __call__ not implemented
self.assertRaises( NotImplementedError, self.baseOp )
# # replica manager
self.assertEqual( isinstance( self.baseOp.dm, DataManager ), True, "DataManager is missing" )
# # tests execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
OperationHandlerBaseTests = testLoader.loadTestsFromTestCase( OperationHandlerBaseTests )
suite = unittest.TestSuite( [ OperationHandlerBaseTests ] )
unittest.TextTestRunner( verbosity = 3 ).run( suite )
|
yujikato/DIRAC
|
src/DIRAC/RequestManagementSystem/Service/test/OperationHandlerBaseTests.py
|
Python
|
gpl-3.0
| 2,636
|
[
"DIRAC"
] |
b74e9f7d844a1459dae8f3202492f4a0e57678201026e5cbeb95b9bdc246b0d6
|
#!/usr/bin/python
import os
import shutil
# from testutil import run_gasoline, test_result
import pynbody as pyn
import numpy as np
import matplotlib.pyplot as plt
import pynbody.plot.sph as p_sph
#Parameters for the test
testdir='sedov'
files = ["data/sedov.std", "data/sedov.param"]
exe = "./ChaNGa"
testname = "Sedov Blast"
def test_radius():
description = """
This test ensures that the blastwave is growing at the
correct rate (that the radius at times 10, 20, 30 are correct). Each peak
density must be within 10% of the theoretical result on average to pass."""
success = False
rshock=[0.8995, 1.1864,1.3958]
threshold = 0.1
time_10 = pyn.load(testdir+'/sedov.000010')
time_20 = pyn.load(testdir+'/sedov.000020')
time_30 = pyn.load(testdir+'/sedov.000030')
fig = plt.figure()
plt.plot(time_10['r'], time_10['rho'], ',r', figure=fig)
plt.plot(time_20['r'], time_20['rho'], ',g', figure=fig)
plt.plot(time_30['r'], time_30['rho'], ',b', figure=fig)
peak_10 = time_10['r'][np.where(time_10['rho'] == np.max(time_10['rho']))]
peak_20 = time_20['r'][np.where(time_20['rho'] == np.max(time_20['rho']))]
peak_30 = time_30['r'][np.where(time_30['rho'] == np.max(time_30['rho']))]
value = 1.0-np.mean((peak_10/rshock[0], peak_20/rshock[1], peak_30/rshock[2]))
if np.abs(value) < threshold:
success = True
plt.title('PASS', color='green')
else:
plt.title('FAIL', color='red')
for i in rshock:
plt.axvline(i, color='black', linewidth=2)
plt.xlabel("Radius $(kpc)$")
plt.ylabel("Density $(M_\odot/kpc^3)$")
plt.xlim((0,2))
return (fig, success, value, threshold, description)
def test_entropy():
description = """
This test ensures that average entropy of the inner 8 particles is within
10% of the theoretical value at the final step."""
success = False
entropy = 5.13e6
threshold = 0.1
time_30 = pyn.load(testdir+'/sedov.000030')
fig = plt.figure()
plt.semilogy(time_30['r'], np.power(time_30['temp'], 1.5)/time_30['rho'], '.k')
plt.semilogy([0,0.1], [5.13e6, 5.13e6], 'r-')
center_idx = np.argsort(time_30['r'])[:8]
value = 1-entropy/np.mean(np.power(time_30['temp'][center_idx], 1.5)/time_30['rho'][center_idx])
if np.abs(value) < threshold:
success = True
plt.title('PASS', color='green')
else:
plt.title('FAIL', color='red')
plt.xlabel("Radius $(kpc)$")
plt.ylabel("$A(S) K^{1.5} cc/g$")
plt.xlim((0,1))
plt.ylim((6e3,6e7))
return (fig, success, value, threshold, description)
def run():
run = run_gasoline(testdir, files, 'sedov.param', exe)
results = test_result(run, testname)
if run == 0:
results.add_result(*test_radius())
results.add_result(*test_entropy())
return results
if __name__ == '__main__':
results = run()
if results.run:
for i in range(len(results.success)):
print "TEST:"
print results.description[i]
print "Pass\tValue\tThreshold\t"
print "%r\t%e\t%e\t" % (results.success[i], results.value[i], results.threshold[i])
results.plots[i].savefig(testname+str(i)+".png")
|
ibackus/compare-changa-builds
|
tests/sedov/sedovblast_ch.py
|
Python
|
mit
| 3,236
|
[
"BLAST"
] |
91d72749f93d6eb004be54d1a899173c75674defb4e279aa41df95fec52f3a33
|
from gpaw.xc.gllb.contribution import Contribution
#from gpaw.xc_functional import XCRadialGrid, XCFunctional, XC3DGrid
#from gpaw.xc_correction import A_Liy, weights
from gpaw.sphere.lebedev import weight_n
from gpaw.utilities import pack
from gpaw.xc.gllb import safe_sqr
from math import sqrt, pi
from gpaw.mpi import world
import numpy as np
class C_Response(Contribution):
def __init__(self, nlfunc, weight, coefficients):
Contribution.__init__(self, nlfunc, weight)
self.coefficients = coefficients
def get_name(self):
return "RESPONSE"
def get_desc(self):
return ""
# Initialize Response functional
def initialize_1d(self):
self.ae = self.nlfunc.ae
# Calcualte the GLLB potential and energy 1d
def add_xc_potential_and_energy_1d(self, v_g):
w_i = self.coefficients.get_coefficients_1d()
u2_j = safe_sqr(self.ae.u_j)
v_g += self.weight * np.dot(w_i, u2_j) / (np.dot(self.ae.f_j, u2_j) +1e-10)
return 0.0 # Response part does not contribute to energy
def initialize(self):
self.gd = self.nlfunc.gd
self.finegd = self.nlfunc.finegd
self.wfs = self.nlfunc.wfs
self.kpt_u = self.wfs.kpt_u
self.setups = self.wfs.setups
self.density = self.nlfunc.density
self.symmetry = self.wfs.symmetry
self.nspins = self.nlfunc.nspins
self.occupations = self.nlfunc.occupations
self.nvalence = self.nlfunc.nvalence
self.kpt_comm = self.wfs.kpt_comm
self.band_comm = self.wfs.band_comm
self.grid_comm = self.gd.comm
self.vt_sg = self.finegd.empty(self.nlfunc.nspins)
self.vt_sG = self.gd.empty(self.nlfunc.nspins)
self.nt_sG = self.gd.empty(self.nlfunc.nspins)
self.Dresp_asp = None
self.D_asp = None
# The response discontinuity is stored here
self.Dxc_vt_sG = None
self.Dxc_Dresp_asp = {}
self.Dxc_D_asp = {}
def update_potentials(self, nt_sg):
nspins = len(nt_sg)
w_kn = self.coefficients.get_coefficients_by_kpt(self.kpt_u, nspins=nspins)
f_kn = [ kpt.f_n for kpt in self.kpt_u ]
#if w_kn is None:
# # LDA Response, before eigenvalues are available
# self.vt_sg[:] = 0.0 # 3*pi**2*np.array(nt_sg))**(1.0/3.0)/(2*pi)
# print self.vt_sg
if w_kn is not None:
self.vt_sG[:] = 0.0
self.nt_sG[:] = 0.0
for kpt, w_n in zip(self.kpt_u, w_kn):
self.wfs.add_to_density_from_k_point_with_occupation(self.vt_sG, kpt, w_n)
self.wfs.add_to_density_from_k_point(self.nt_sG, kpt)
self.band_comm.sum(self.nt_sG)
self.kpt_comm.sum(self.nt_sG)
self.band_comm.sum(self.vt_sG)
self.kpt_comm.sum(self.vt_sG)
if self.wfs.symmetry:
for nt_G, vt_G in zip(self.nt_sG, self.vt_sG):
self.symmetry.symmetrize(nt_G, self.gd)
self.symmetry.symmetrize(vt_G, self.gd)
self.wfs.calculate_atomic_density_matrices_with_occupation(
self.Dresp_asp, w_kn)
self.wfs.calculate_atomic_density_matrices_with_occupation(
self.D_asp, f_kn)
self.vt_sG /= self.nt_sG +1e-10
for s in range(nspins):
self.density.interpolator.apply(self.vt_sG[s], self.vt_sg[s])
def calculate_spinpaired(self, e_g, n_g, v_g):
self.update_potentials([n_g])
v_g[:] += self.weight * self.vt_sg[0]
return 0.0
def calculate_spinpolarized(self, e_g, na_g, va_g, nb_g, vb_g):
self.update_potentials([na_g, nb_g])
va_g[:] += self.weight * self.vt_sg[0]
vb_g[:] += self.weight * self.vt_sg[1]
return 0.0
def calculate_energy_and_derivatives(self, setup, D_sp, H_sp, a):
# Get the XC-correction instance
c = setup.xc_correction
ncresp_g = setup.extra_xc_data['core_response'] / self.nspins
for D_p, dEdD_p, Dresp_p in zip(D_sp, H_sp, self.Dresp_asp.get(a)):
D_Lq = np.dot(c.B_pqL.T, D_p)
n_Lg = np.dot(D_Lq, c.n_qg) # Construct density
n_Lg[0] += c.nc_g * sqrt(4 * pi) / self.nspins
nt_Lg = np.dot(D_Lq, c.nt_qg) # Construct smooth density (without smooth core)
Dresp_Lq = np.dot(c.B_pqL.T, Dresp_p)
nresp_Lg = np.dot(Dresp_Lq, c.n_qg) # Construct 'response density'
nrespt_Lg = np.dot(Dresp_Lq, c.nt_qg) # Construct smooth 'response density' (w/o smooth core)
for w, Y_L in zip(weight_n, c.Y_nL):
nt_g = np.dot(Y_L, nt_Lg)
nrespt_g = np.dot(Y_L, nrespt_Lg)
x_g = nrespt_g / (nt_g + 1e-10)
dEdD_p -= self.weight * w * np.dot(np.dot(c.B_pqL, Y_L),
np.dot(c.nt_qg, x_g * c.rgd.dv_g))
n_g = np.dot(Y_L, n_Lg)
nresp_g = np.dot(Y_L, nresp_Lg)
x_g = (nresp_g+ncresp_g) / (n_g + 1e-10)
dEdD_p += self.weight * w * np.dot(np.dot(c.B_pqL, Y_L),
np.dot(c.n_qg, x_g * c.rgd.dv_g))
return 0.0
def integrate_sphere(self, a, Dresp_sp, D_sp, Dwf_p):
c = self.nlfunc.setups[a].xc_correction
Dresp_p, D_p = Dresp_sp[0], D_sp[0]
D_Lq = np.dot(c.B_pqL.T, D_p)
n_Lg = np.dot(D_Lq, c.n_qg) # Construct density
n_Lg[0] += c.nc_g * sqrt(4 * pi)
nt_Lg = np.dot(D_Lq, c.nt_qg) # Construct smooth density (without smooth core)
Dresp_Lq = np.dot(c.B_pqL.T, Dresp_p) # Construct response
nresp_Lg = np.dot(Dresp_Lq, c.n_qg) # Construct 'response density'
nrespt_Lg = np.dot(Dresp_Lq, c.nt_qg) # Construct smooth 'response density' (w/o smooth core)
Dwf_Lq = np.dot(c.B_pqL.T, Dwf_p) # Construct lumo wf
nwf_Lg = np.dot(Dwf_Lq, c.n_qg)
nwft_Lg = np.dot(Dwf_Lq, c.nt_qg)
E = 0.0
for w, Y_L in zip(weight_n, c.Y_nL):
v = np.dot(Y_L, nwft_Lg) * np.dot(Y_L, nrespt_Lg) / (np.dot(Y_L, nt_Lg) + 1e-10)
E -= self.weight * w * np.dot(v, c.rgd.dv_g)
v = np.dot(Y_L, nwf_Lg) * np.dot(Y_L, nresp_Lg) / (np.dot(Y_L, n_Lg) + 1e-10)
E += self.weight * w * np.dot(v, c.rgd.dv_g)
return E
def add_smooth_xc_potential_and_energy_1d(self, vt_g):
w_ln = self.coefficients.get_coefficients_1d(smooth=True)
v_g = np.zeros(self.ae.N)
n_g = np.zeros(self.ae.N)
for w_n, f_n, u_n in zip(w_ln, self.ae.f_ln, self.ae.s_ln): # For each angular momentum
u2_n = safe_sqr(u_n)
v_g += np.dot(w_n, u2_n)
n_g += np.dot(f_n, u2_n)
vt_g += self.weight * v_g / (n_g + 1e-10)
return 0.0 # Response part does not contribute to energy
def calculate_delta_xc(self, homolumo = None):
if homolumo == None:
# Calculate band gap
print "Warning: Calculating KS-gap directly from the k-points, can be inaccurate."
#homolumo = self.occupations.get_homo_lumo(self.wfs)
#homo, lumo = homolumo
#Ksgap = lumo-homo
#print "Using KS-gap of ", Ksgap
for a in self.density.D_asp:
ni = self.setups[a].ni
self.Dxc_Dresp_asp[a] = np.zeros((self.nlfunc.nspins, ni * (ni + 1) // 2))
self.Dxc_D_asp[a] = np.zeros((self.nlfunc.nspins, ni * (ni + 1) // 2))
# Calculate new response potential with LUMO reference
w_kn = self.coefficients.get_coefficients_by_kpt(self.kpt_u, lumo_perturbation=True,
homolumo=homolumo,
nspins=self.nspins)
f_kn = [ kpt.f_n for kpt in self.kpt_u ]
vt_sG = self.gd.zeros(self.nlfunc.nspins)
nt_sG = self.gd.zeros(self.nlfunc.nspins)
for kpt, w_n in zip(self.kpt_u, w_kn):
self.wfs.add_to_density_from_k_point_with_occupation(vt_sG, kpt, w_n)
self.wfs.add_to_density_from_k_point(nt_sG, kpt)
self.band_comm.sum(nt_sG)
self.kpt_comm.sum(nt_sG)
self.band_comm.sum(vt_sG)
self.kpt_comm.sum(vt_sG)
if self.wfs.symmetry:
for nt_G, vt_G in zip(nt_sG, vt_sG):
self.symmetry.symmetrize(nt_G, self.gd)
self.symmetry.symmetrize(vt_G, self.gd)
vt_sG /= nt_sG + 1e-10
self.Dxc_vt_sG = vt_sG.copy()
self.wfs.calculate_atomic_density_matrices_with_occupation(
self.Dxc_Dresp_asp, w_kn)
self.wfs.calculate_atomic_density_matrices_with_occupation(
self.Dxc_D_asp, f_kn)
def calculate_delta_xc_perturbation(self):
homo, lumo = self.occupations.get_homo_lumo(self.wfs)
Ksgap = lumo - homo
# Calculate average of lumo reference response potential
method1_dxc = np.average(self.Dxc_vt_sG[0])
nt_G = self.gd.empty()
ne = self.nvalence # Number of electrons
assert self.nspins == 1
lumo_n = ne // 2
eps_u =[]
eps_un = np.zeros((len(self.kpt_u),len(self.kpt_u[0].psit_nG)))
for u, kpt in enumerate(self.kpt_u):
#print "K-Point index: ",u
for n in range(len(kpt.psit_nG)):
nt_G[:] = 0.0
self.wfs.add_orbital_density(nt_G, kpt, n)
E = 0.0
for a in self.density.D_asp:
D_sp = self.Dxc_D_asp[a]
Dresp_sp = self.Dxc_Dresp_asp[a]
P_ni = kpt.P_ani[a]
Dwf_p = pack(np.outer(P_ni[n].T.conj(), P_ni[n]).real)
E += self.integrate_sphere(a, Dresp_sp, D_sp, Dwf_p)
#print "Atom corrections", E*27.21
E = self.grid_comm.sum(E)
E += self.gd.integrate(nt_G*self.Dxc_vt_sG[0])
E += kpt.eps_n[lumo_n]
#print "Old eigenvalue", kpt.eps_n[lumo_n]*27.21, " New eigenvalue ", E*27.21, " DXC", E*27.21-kpt.eps_n[lumo_n]*27.21
eps_un[u][n] = E
method2_lumo = min([ eps_n[lumo_n] for eps_n in eps_un])
method2_lumo = -self.kpt_comm.max(-method2_lumo)
method2_dxc = method2_lumo-lumo
Ha = 27.2116
Ksgap *= Ha
method1_dxc *= Ha
method2_dxc *= Ha
if world.rank is not 0:
return (Ksgap, method2_dxc)
print
print "\Delta XC calulation"
print "-----------------------------------------------"
print "| Method | KS-Gap | \Delta XC | QP-Gap |"
print "-----------------------------------------------"
print "| Averaging | %7.2f | %9.2f | %7.2f |" % (Ksgap, method1_dxc, Ksgap+method1_dxc)
print "| Lumo pert. | %7.2f | %9.2f | %7.2f |" % (Ksgap, method2_dxc, Ksgap+method2_dxc)
print "-----------------------------------------------"
print
return (Ksgap, method2_dxc)
def initialize_from_atomic_orbitals(self, basis_functions):
# Initiailze 'response-density' and density-matrices
self.Dresp_asp = {}
self.D_asp = {}
for a in self.density.D_asp.keys():
ni = self.setups[a].ni
self.Dresp_asp[a] = np.zeros((self.nlfunc.nspins, ni * (ni + 1) // 2))
self.D_asp[a] = np.zeros((self.nlfunc.nspins, ni * (ni + 1) // 2))
f_sM = np.empty((self.nspins, basis_functions.Mmax))
self.D_asp = {}
f_asi = {}
w_asi = {}
for a in basis_functions.atom_indices:
w_j = self.setups[a].extra_xc_data['w_j']
# Basis function coefficients based of response weights
w_si = self.setups[a].calculate_initial_occupation_numbers(
0, False, charge=0, nspins=self.nspins, f_j=w_j)
# Basis function coefficients based on density
f_si = self.setups[a].calculate_initial_occupation_numbers(
0, False, charge=0, nspins=self.nspins)
if a in basis_functions.my_atom_indices:
self.Dresp_asp[a] = self.setups[a].initialize_density_matrix(w_si)
self.D_asp[a] = self.setups[a].initialize_density_matrix(f_si)
f_asi[a] = f_si
w_asi[a] = w_si
self.nt_sG.fill(0.0)
basis_functions.add_to_density(self.nt_sG, f_asi)
self.vt_sG.fill(0.0)
basis_functions.add_to_density(self.vt_sG, w_asi)
# Update vt_sG to correspond atomic response potential. This will be
# used until occupations and eigenvalues are available.
self.vt_sG /= self.nt_sG + 1e-10
def add_extra_setup_data(self, dict):
ae = self.ae
njcore = ae.njcore
w_ln = self.coefficients.get_coefficients_1d(smooth=True)
w_j = []
for w_n in w_ln:
for w in w_n:
w_j.append(w)
dict['w_j'] = w_j
w_j = self.coefficients.get_coefficients_1d()
x_g = np.dot(w_j[:njcore], safe_sqr(ae.u_j[:njcore]))
x_g[1:] /= ae.r[1:]**2 * 4*np.pi
x_g[0] = x_g[1]
dict['core_response'] = x_g
# For debugging purposes
w_j = self.coefficients.get_coefficients_1d()
u2_j = safe_sqr(self.ae.u_j)
v_g = self.weight * np.dot(w_j, u2_j) / (np.dot(self.ae.f_j, u2_j) +1e-10)
v_g[0] = v_g[1]
dict['all_electron_response'] = v_g
# Calculate Hardness of spherical atom, for debugging purposes
l = [ np.where(f<1e-3, e, 1000) for f,e in zip(self.ae.f_j, self.ae.e_j)]
h = [ np.where(f>1e-3, e, -1000) for f,e in zip(self.ae.f_j, self.ae.e_j)]
lumo_e = min(l)
homo_e = max(h)
if lumo_e < 999: # If there is unoccpied orbital
w_j = self.coefficients.get_coefficients_1d(lumo_perturbation = True)
v_g = self.weight * np.dot(w_j, u2_j) / (np.dot(self.ae.f_j, u2_j) +1e-10)
e2 = [ e+np.dot(u2*v_g, self.ae.dr) for u2,e in zip(u2_j, self.ae.e_j) ]
lumo_2 = min([ np.where(f<1e-3, e, 1000) for f,e in zip(self.ae.f_j, e2)])
print "New lumo eigenvalue:", lumo_2 * 27.2107
self.hardness = lumo_2 - homo_e
print "Hardness predicted: %10.3f eV" % (self.hardness * 27.2107)
def write(self, w, natoms):
"""Writes response specific data to disc.
During the writing process, the DeltaXC is calculated (if not yet calculated)
"""
if self.Dxc_vt_sG == None:
self.calculate_delta_xc()
wfs = self.wfs
world = wfs.world
domain_comm = wfs.gd.comm
kpt_comm = wfs.kpt_comm
band_comm = wfs.band_comm
master = (world.rank == 0)
nadm = 0
for setup in wfs.setups:
ni = setup.ni
nadm += ni * (ni + 1) // 2
# Not yet tested for parallerization
#assert world.size == 1
# Write the pseudodensity on the coarse grid:
if master:
w.add('GLLBPseudoResponsePotential',
('nspins', 'ngptsx', 'ngptsy', 'ngptsz'), dtype=float)
if kpt_comm.rank == 0:
for s in range(wfs.nspins):
vt_sG = wfs.gd.collect(self.vt_sG[s])
if master:
w.fill(vt_sG)
if master:
w.add('GLLBDxcPseudoResponsePotential',
('nspins', 'ngptsx', 'ngptsy', 'ngptsz'), dtype=float)
if kpt_comm.rank == 0:
for s in range(wfs.nspins):
vt_sG = wfs.gd.collect(self.Dxc_vt_sG[s])
if master:
w.fill(vt_sG)
print "Integration over vt_sG", domain_comm.sum(np.sum(self.vt_sG.ravel()))
print "Integration over Dxc_vt_sG", domain_comm.sum(np.sum(self.Dxc_vt_sG.ravel()))
if master:
all_D_sp = np.empty((wfs.nspins, nadm))
all_Dresp_sp = np.empty((wfs.nspins, nadm))
all_Dxc_D_sp = np.empty((wfs.nspins, nadm))
all_Dxc_Dresp_sp = np.empty((wfs.nspins, nadm))
p1 = 0
for a in range(natoms):
ni = wfs.setups[a].ni
nii = ni * (ni + 1) // 2
if a in self.D_asp:
D_sp = self.D_asp[a]
Dresp_sp = self.Dresp_asp[a]
Dxc_D_sp = self.Dxc_D_asp[a]
Dxc_Dresp_sp = self.Dxc_Dresp_asp[a]
else:
D_sp = np.empty((wfs.nspins, nii))
domain_comm.receive(D_sp, wfs.rank_a[a], 27)
Dresp_sp = np.empty((wfs.nspins, nii))
domain_comm.receive(Dresp_sp, wfs.rank_a[a], 271)
Dxc_D_sp = np.empty((wfs.nspins, nii))
domain_comm.receive(Dxc_D_sp, wfs.rank_a[a], 28)
Dxc_Dresp_sp = np.empty((wfs.nspins, nii))
domain_comm.receive(Dxc_Dresp_sp, wfs.rank_a[a], 272)
p2 = p1 + nii
all_D_sp[:, p1:p2] = D_sp
all_Dresp_sp[:, p1:p2] = Dresp_sp
all_Dxc_D_sp[:, p1:p2] = Dxc_D_sp
all_Dxc_Dresp_sp[:, p1:p2] = Dxc_Dresp_sp
p1 = p2
assert p2 == nadm
w.add('GLLBAtomicDensityMatrices', ('nspins', 'nadm'), all_D_sp)
w.add('GLLBAtomicResponseMatrices', ('nspins', 'nadm'), all_Dresp_sp)
w.add('GLLBDxcAtomicDensityMatrices', ('nspins', 'nadm'), all_Dxc_D_sp)
w.add('GLLBDxcAtomicResponseMatrices', ('nspins', 'nadm'), all_Dxc_Dresp_sp)
elif kpt_comm.rank == 0 and band_comm.rank == 0:
for a in range(natoms):
if a in self.density.D_asp:
domain_comm.send(self.D_asp[a], 0, 27)
domain_comm.send(self.Dresp_asp[a], 0, 271)
domain_comm.send(self.Dxc_D_asp[a], 0, 28)
domain_comm.send(self.Dxc_Dresp_asp[a], 0, 272)
def read(self, r):
wfs = self.wfs
world = wfs.world
domain_comm = wfs.gd.comm
kpt_comm = wfs.kpt_comm
band_comm = wfs.band_comm
self.vt_sG = wfs.gd.empty(wfs.nspins)
self.Dxc_vt_sG = wfs.gd.empty(wfs.nspins)
print "Reading vt_sG"
for s in range(wfs.nspins):
self.gd.distribute(r.get('GLLBPseudoResponsePotential', s),
self.vt_sG[s])
print "Reading Dxc_vt_sG"
for s in range(wfs.nspins):
self.gd.distribute(r.get('GLLBDxcPseudoResponsePotential', s),
self.Dxc_vt_sG[s])
print "Integration over vt_sG", domain_comm.sum(np.sum(self.vt_sG.ravel()))
print "Integration over Dxc_vt_sG", domain_comm.sum(np.sum(self.Dxc_vt_sG.ravel()))
# Read atomic density matrices and non-local part of hamiltonian:
D_sp = r.get('GLLBAtomicDensityMatrices')
Dresp_sp = r.get('GLLBAtomicResponseMatrices')
Dxc_D_sp = r.get('GLLBDxcAtomicDensityMatrices')
Dxc_Dresp_sp = r.get('GLLBDxcAtomicResponseMatrices')
self.D_asp = {}
self.Dresp_asp = {}
self.Dxc_D_asp = {}
self.Dxc_Dresp_asp = {}
p1 = 0
for a, setup in enumerate(wfs.setups):
ni = setup.ni
p2 = p1 + ni * (ni + 1) // 2
# NOTE: Distrbibutes the matrices to more processors than necessary
self.D_asp[a] = D_sp[:, p1:p2].copy()
self.Dresp_asp[a] = Dresp_sp[:, p1:p2].copy()
self.Dxc_D_asp[a] = Dxc_D_sp[:, p1:p2].copy()
self.Dxc_Dresp_asp[a] = Dxc_Dresp_sp[:, p1:p2].copy()
print "Proc", world.rank, " reading atom ", a
p1 = p2
if __name__ == "__main__":
from gpaw.xc_functional import XCFunctional
xc = XCFunctional('LDA')
dx = 1e-3
Ntot = 100000
x = np.array(range(1,Ntot+1))*dx
kf = (2*x)**(1./2)
n_g = kf**3 / (3*pi**2)
v_g = np.zeros(Ntot)
e_g = np.zeros(Ntot)
xc.calculate_spinpaired(e_g, n_g, v_g)
vresp = v_g - 2*e_g / n_g
f = open('response.dat','w')
for xx, v in zip(x, vresp):
print >>f, xx, v
f.close()
|
ajylee/gpaw-rtxs
|
gpaw/xc/gllb/c_response.py
|
Python
|
gpl-3.0
| 20,645
|
[
"GPAW"
] |
714155190386b5d05ecd4649d954c7e3fb977a46ccdf05a832bc74beea867999
|
#!/usr/bin/env python3
import os,sys,string,subprocess,signal,shutil,argparse
from subprocess import PIPE, run
#psutil
mcdir = sys.path[0]
#1 READ/PARSE COMMAND LINE ARGUMENTS
#1.1 PROCESS COMMAND LINE ARGUMENTS
parser = argparse.ArgumentParser(description="MetaCompass metagenome assembler version 2.0.0 by Victoria Cepeda (vcepeda@cs.umd.edu)")
#parser = argparse.ArgumentParser(description='Options')
group1 = parser.add_argument_group('required')
#group1.add_argument("-s",'--snakefile', help='metacompass rules file',default="",nargs='?',required=1,type=str)
#group1.add_argument("-S",'--Samples', help='Provide file with fq reads (1 file per line)',default="", nargs='?',required=0,type=str)
group1.add_argument("-1",'--forward', help='comma separated list of forward paired-end fastq or fastq.gz',default="", nargs='?',required=0,type=str)
group1.add_argument("-2",'--reverse', help='comma separated list of reverse paired-end fastq or fastq.gz',default="", nargs='?',required=0,type=str)
group1.add_argument("-U",'--unpaired', help='comma separated list of unpaired fastq or fastq.gz',default="", nargs='?',required=0,type=str)
group1.add_argument("-o",'--outdir', help='output directory',default="metacompass_assembly", nargs='?',required=0,type=str)
group1.add_argument("-y",'--memory', help='memory',default=8, nargs='?',required=0,type=int)
group1.add_argument("-t",'--threads', type=int,help='num threads',default=1, nargs='?')
group5 = parser.add_argument_group("reference selection")
group5.add_argument("-r",'--ref', help='reference genomes',default="NA",nargs='?')
group5.add_argument("-s",'--refsel', help='reference selection [tax/all]',default="tax",nargs='?')
group5.add_argument("-p",'--pickref', help='depth or breadth',default="breadth",nargs='?')
group5.add_argument("-l",'--readlen', help='max read length (needed by kmer-mask)',default="100",nargs='?',type=int)
group6 = parser.add_argument_group("assembly")
#group6 = parser.add_mutually_exclusive_group()
group6.add_argument("-m",'--mincov', help='min coverage to assemble',default="1",nargs='?',type=int)
group6.add_argument("-g",'--minctglen', help='min contig length',default="1",nargs='?',type=int)
group6.add_argument('--tracks', help='run pilon with --tracks option',default=False, required=0,action='store_true')
group2 = parser.add_argument_group('output')
group2.add_argument("-b",'--clobber', help='clobber output directory (if exists?)',default=False,required=0,action='store_true')
group2.add_argument("-k",'--keepoutput', help='keep all output generated (default is to delete all but final fasta files)',default=False,required=0,action='store_true')
#group3 = parser.add_argument_group('performance')
#group3.add_argument("-t",'--threads', type=int,help='num threads',default=1, nargs='?')
group4 = parser.add_argument_group('snakemake')
group4.add_argument("-c",'--config', help='config (json) file, set read length etc',default="",nargs='?',required=0,type=str)
group4.add_argument('--Force', help='force snakemake to rerun',default=False,required=0,action='store_true')
group4.add_argument('--unlock',help='unlock snakemake locks',default=False, required=0,action='store_true')
group4.add_argument('--nolock', help='remove stale locks',default=False,required=0,action='store_true')
group4.add_argument('--verbose', help='verbose',default=False,required=0,action='store_true')
group4.add_argument('--reason', help='reason',default=False,required=0,action='store_true')
#--dryrun, -n :Do not execute anything.
group4.add_argument('--dryrun', help='dryrun',default=False,required=0,action='store_true')
#-cores
args = parser.parse_args()
#1.2 PROCESS COMMAND LINE ARGUMENTS
minctglen = args.minctglen
mincov = args.mincov
readlen=args.readlen
refsel=args.refsel
threads = args.threads
memory = args.memory
ref = args.ref
keepoutput = args.keepoutput
tracks=args.tracks
#snakefile = args.snakefile
config = args.config
#samples = args.Samples.replace(" ","")
unpaired = args.unpaired
fpaired = args.forward
rpaired = args.reverse
outdir = args.outdir
pickref = args.pickref
clobber = args.clobber
unlock = args.unlock
nolock = args.nolock
force = args.Force
verbose = args.verbose
reason = args.reason
dryrun = args.dryrun
if not (args.unpaired or (args.forward and args.reverse)):
parser.error('\nRequired: \
{-1 FORDWARD -2 REVERSE [-U UNPAIRED ]| -U UNPAIRED} [-o OUTDIR] -y MEMORY -t THREADS')
if not (args.ref or (args.forward and args.reverse)):
parser.error('\nRequired: \
{-1 FORDWARD -2 REVERSE [-U UNPAIRED ]| -U UNPAIRED} [-o OUTDIR] -y MEMORY -t THREADS')
#4. Check for existence output file:
if os.path.exists(outdir):
if not clobber and not force:#test this
if os.path.exists("%s/run.ok"%(outdir)):
print("ERROR: Output dir (%s) exists and contains a previous, successful run. Please specify alternate output directory or force run with --force"%(outdir))
sys.exit(1)
elif os.path.exists("%s/run.fail"%(outdir)):
print("ERROR: Output dir (%s) exists and contains a previous, failed run. Please specify alternate output directory or force run with --force"%(outdir))
sys.exit(1)
else:
print("ERROR: Output dir (%s) exists. Please specify alternate output directory or run with --clobber or --unlock"%(outdir))#force
sys.exit(1)
elif force:#test this
os.system("rm -rf %s/*"%(outdir))
os.system("mkdir %s"%(outdir))
else:
os.makedirs(outdir)
optsfile="%s/opts.txt"%(outdir)
outfile = open(optsfile,'a')
print ( "minctglen: %s" % (minctglen), file= outfile)
print ( "mincov: %s" % (mincov), file= outfile)
print ( "readlen: %s" % (readlen), file= outfile)
print ( "refsel: %s" % (refsel), file= outfile)
print ( "threads: %s" % (threads), file= outfile)
print ( "memory: %s" % (memory), file= outfile)
print ( "ref: %s" % (ref), file= outfile)
print ( "keepoutput: %s" % (keepoutput), file= outfile)
print ( "tracks: %s" % (tracks), file= outfile)
print ( "unpaired: %s" % (unpaired), file= outfile)
print ( "fpaired: %s" % (fpaired), file= outfile)
print ( "rpaired: %s" % (rpaired), file= outfile)
print ( "outdir: %s" % (outdir), file= outfile)
print ( "pickref: %s" % (pickref), file= outfile)
print ( "clobber: %s" % (clobber), file= outfile)
print ( "unlock: %s" % (unlock), file= outfile)
print ( "nolock: %s" % (nolock), file= outfile)
print ( "force: %s" % (force), file= outfile)
print ( "verbose: %s" % (verbose), file= outfile)
print ( "reason: %s" % (reason), file= outfile)
print ( "dryrun: %s" % (dryrun), file= outfile)
#cmd="snakemake--prioritize join_contigs "
cmd="snakemake --printshellcmds "
#--printshellcmds Print out the shell commands that will be executed.
if verbose:
cmd += " --verbose"
if unlock:
cmd += " --unlock"
if nolock:
cmd += " --nolock"
if force:
cmd += " --Force"
if reason:
cmd += " --reason"
if dryrun:
cmd += " --dryrun"
#1.3 Checking config file
print("confirming config file exists...", file= outfile)
if config == "":
config = "%s/snakemake/config.json"%(mcdir)
if not os.path.exists(config):
print("ERROR: configfile %s not found!"%(config))
sys.exit(1)
print ("config: %s" % (config), file= outfile)
#1.4 Checking reference genomes
#fix
if ref != "NA":
print("confirming file containing reference genomes exists...",file=outfile)
if not os.path.exists(ref):
print("ERROR: reference genome file %s not found!"%(ref))
sys.exit(1)
else:
# os.system("cp %s %s/%s"%(ref,outdir,ref.split(os.sep)[-1]))
print("Reference genome file: %s" % (ref), file= outfile)
#2. Check for dependencies:
print("checking for assembly dependencies (Snakemake,Bowtie2,Samtools)",file=outfile)
#2.1 Check for assembly dependencies:
command = ['which', 'snakemake']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print("Snakemake",file=outfile)
print(result.stdout,file=outfile)
if result.returncode != 0:
print("Snakemake not found")
sys.exit(1)
command = ['which', 'bowtie2']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print("Bowtie2:",file=outfile)
print(result.stdout,file=outfile)
if result.returncode != 0:
print("Bowtie2 not found. Bowtie2 v>=2.2.9 required")
sys.exit(1)
#else:
#version=str(os.system("bowtie2 --version|head -n1|cut -f3 -d ' '|cut -f1-3 -d '.'|head -n1"))
#version=version.split("\n")[0]
#required=str("2.2.9")
#print ("version %s required %s" % (version, required))
#if version < required:#"2.2.9":
# print("Bowtie2 2.2.9 or higher required")
# sys.exit(1)
command = ['which', 'samtools']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print("Samtools:",file=outfile)
print(result.stdout,file=outfile)
if result.returncode != 0:
print("Samtools not found")
sys.exit(1)
#2.2 Check for reference selection dependencies:
print("checking for reference selection dependencies (Blast, kmer-mask, mash)",file=outfile)
if ref == "NA":
command = ['which', 'blastn']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print("BLAST:",file=outfile)
print(result.stdout,file=outfile)
if result.returncode != 0:
print("Blast not found")
sys.exit(1)
command = ['which', 'kmer-mask']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print("Kmer-mask:",file=outfile)
print(result.stdout,file=outfile)
if result.returncode != 0:
print("Kmer-mask not found")
sys.exit(1)
command = ['which', 'mash']
result = run(command, stdout=PIPE, stderr=PIPE, universal_newlines=True)
print("Mash:",file=outfile)
print(result.stdout,file=outfile)
if result.returncode != 0:
print("Mash not found")
sys.exit(1)
allsamples=[]
if fpaired != "":
for file in fpaired.split(","):
if not os.path.exists(file):
print("ERROR: could not locate file %s"%(file))
sys.exit()
else:
allsamples.append(file)
if rpaired != "":
for file in rpaired.split(","):
if not os.path.exists(file):
print("ERROR: could not locate file %s"%(file))
sys.exit()
else:
allsamples.append(file)
paired=""
if fpaired != "" and rpaired != "":
paired="yes"
#-U single end
if unpaired != "":
for file in unpaired.split(","):
if not os.path.exists(file):
print("ERROR: could not locate file %s"%(file))
sys.exit(1)
else:
allsamples.append(file)
##########################################################################
#reference_selection vs assembly only
cmd += " --cores %d -a --configfile %s --config outdir=%s pickref=%s mcdir=%s length=%d mincov=%d minlen=%d nthreads=%d memory=%d refsel=%s tracks=%s"%(threads,config,outdir,pickref,mcdir,readlen,mincov,minctglen,threads,memory,refsel,tracks)
cmd += " reads="
print("ALL READS found:",file=outfile)
for fqfile in allsamples:
cmd += str(fqfile)+","
print("%s"%(fqfile),file=outfile)
cmd = cmd[:-1]#remove last comma
if fpaired != "" and rpaired !="":
cmd += " r1=%s r2=%s"%(fpaired,rpaired)
if unpaired != "":
cmd += " ru=%s"%(unpaired)
if ref != "NA":
print("REFERENCE genome file provided. Reference Selection step will be skipped.")
cmd += " reference=%s" %(ref)
if unpaired != "" and paired =="":
cmd += " --snakefile %s/snakemake/metacompass.ref.unpaired.py"%(mcdir)
elif paired != "" and unpaired =="":
cmd += " --snakefile %s/snakemake/metacompass.ref.paired.py"%(mcdir)
elif paired != "" and unpaired !="":
cmd += " --snakefile %s/snakemake/metacompass.ref.py"%(mcdir)
#elif samples =="":
# cmd += " --snakefile %s/snakemake/metacompass.ref.py"%(mcdir)
else:
cmd += " reference=reference_selection/mc.refseq.fna"
if unpaired != "" and paired =="":
cmd += " --snakefile %s/snakemake/metacompass.unpaired.py"%(mcdir)
elif paired != "" and unpaired =="":
cmd += " --snakefile %s/snakemake/metacompass.paired.py"%(mcdir)
elif paired != "" and unpaired !="":
cmd += " --snakefile %s/snakemake/metacompass.py"%(mcdir)
#print("Snakemake command:")
#print("%s\n"%(cmd))
print("Snakemake command:",file=outfile)
print("%s\n"%(cmd),file=outfile)
#RUN SNAKEMAKE!!
try:
ret = subprocess.Popen(cmd,shell=True)
ret.communicate()
except KeyboardInterrupt:
# print('Interrupted')
# print("ERROR: snakemake command failed; exiting..")
# os.system("touch %s/run.fail"%(outdir))
try:
sys.exit(1)
except SystemExit:
# print("ERROR: snakemake command failed; exiting..")
# os.system("touch %s/run.fail"%(outdir))
os._exit(1)
#os.killpg(ret.pid,signal.SIGKILL)
#os.killpg(ret.pid,signal.SIGKILL)
#print("ERROR: SIGKILL")
#sys.exit(1)
except:
ret.returncode = 1
if ret.returncode != 0:
print("ERROR: snakemake command failed; exiting..")
os.system("touch %s/run.fail"%(outdir))
try:
sys.exit(1)
except SystemExit:
#print("ERROR: snakemake command failed; exiting..")
#os.system("touch %s/run.fail"%(outdir))
os._exit(1)
else:
if dryrun:
sys.exit(0)
#5 CLEANING output files
if os.path.exists("%s/assembly/metacompass.genomes_coverage.txt"%(outdir)):
os.system("mv %s/assembly/metacompass.genomes_coverage.txt %s/metacompass_output/"%(outdir,outdir))
if os.path.exists("%s/assembly/metacompass.assembled.fna"%(outdir)):
os.system("mv %s/assembly/metacompass.assembled.fna %s/metacompass_output/metacompass.references.fna"%(outdir,outdir))
print("Cleaning up files..")
if os.path.exists("%s/intermediate_files"%(outdir)):
os.system("rm -rf %s/intermediate_files"%(outdir))
#reference_Selection
if os.path.exists("%s/reference_selection"%(outdir)):
os.system("rm %s/reference_selection/*.fastq "%(outdir))
os.system("rm %s/reference_selection/mc.blastn* "%(outdir))
os.system("rm %s/reference_selection/contigs_clusters "%(outdir))
os.system("rm %s/reference_selection/*msh* "%(outdir))
os.system("mv %s/reference_selection/*.log %s/logs "%(outdir,outdir))
#assembly
if os.path.exists("%s/assembly/mc.index.1.bt2"%(outdir)):
os.system("rm %s/assembly/mc.index* "%(outdir))
if os.path.exists("%s/assembly/mc.sam"%(outdir)):
os.system("mv %s/assembly/mc.sam %s/mapped_reads/"%(outdir,outdir))
#error_correction
if os.path.exists("%s/error_correction/mc.index.1.bt2"%(outdir)):
os.system("rm %s/error_correction/mc.index* "%(outdir))
if os.path.exists("%s/error_correction/mc.sam"%(outdir)):
os.system("rm %s/error_correction/mc.sam "%(outdir))
if os.path.exists("%s/error_correction/mc.sam.bam"%(outdir)):
os.system("rm %s/error_correction/mc.sam.bam "%(outdir))
if os.path.exists("%s/error_correction/mc_unpaired.sam"%(outdir)):
os.system("rm %s/error_correction/mc_unpaired.sam "%(outdir))
if os.path.exists("%s/error_correction/mc_unpaired.sam.bam"%(outdir)):
os.system("rm %s/error_correction/mc_unpaired.sam.bam "%(outdir))
if os.path.exists("%s/error_correction/mc.sam.unmapped.1.fq"%(outdir)):
os.system("mv %s/error_correction/mc.sam.unmapped.1.fq %s/unmapped_reads/"%(outdir,outdir))
if os.path.exists("%s/error_correction/mc.sam.unmapped.2.fq"%(outdir)):
os.system("mv %s/error_correction/mc.sam.unmapped.2.fq %s/unmapped_reads/"%(outdir,outdir))
if os.path.exists("%s/error_correction/mc.sam.unmapped.u.fq"%(outdir)):
os.system("mv %s/error_correction/mc.sam.unmapped.u.fq %s/unmapped_reads/"%(outdir,outdir))
#simplified output
if not keepoutput:
os.system("rm -rf %s/intermediate_files"%(outdir))
os.system("rm -rf %s/error_correction"%(outdir))
if os.path.exists("%s/reference_selection"%(outdir)):
os.system("rm -rf %s/reference_selection"%(outdir))
os.system("rm -rf %s/assembly"%(outdir))
if os.path.exists("%s/unmapped_reads"%(outdir)):
os.system("rm -rf %s/unmapped_reads"%(outdir))
if os.path.exists("%s/mapped_reads"%(outdir)):
os.system("rm -rf %s/mapped_reads"%(outdir))
|
marbl/MetaCompass
|
go_metacompass2.py
|
Python
|
artistic-2.0
| 16,636
|
[
"BLAST"
] |
67e46cf523b8939960ceb6a3cfe7d4bbb673d0dd6b6daf0dffff28ceae41d8e9
|
# -*- coding: utf-8 -*-
import contextlib
import pyjade
def process_param(key, value, terse=False):
if terse:
if (key == value) or (value is True):
return key
if isinstance(value, basestring):
value = value.decode('utf8')
return '''%s="%s"''' % (key, value)
TYPE_CODE = {
'if': lambda v: bool(v),
'unless': lambda v: not bool(v),
'elsif': lambda v: bool(v),
'else': lambda v: True}
@contextlib.contextmanager
def local_context_manager(compiler, local_context):
old_local_context = compiler.local_context
new_local_context = dict(compiler.local_context)
new_local_context.update(local_context)
compiler.local_context = new_local_context
yield
compiler.local_context = old_local_context
class HTMLCompiler(pyjade.compiler.Compiler):
global_context = dict()
local_context = dict()
mixins = dict()
def _do_eval(self, value):
if isinstance(value, basestring):
value = value.encode('utf-8')
try:
value = eval(value, self.global_context, self.local_context)
except:
return ''
return value
def _get_value(self, attr):
value = attr['val']
if attr['static']:
return attr['val']
if isinstance(value, basestring):
return self._do_eval(value)
else:
return attr['name']
def _make_mixin(self, mixin):
arg_names = [arg.strip() for arg in mixin.args.split(",")]
def _mixin(self, args):
if args:
arg_values = self._do_eval(args)
else:
arg_values = []
local_context = dict(zip(arg_names, arg_values))
with local_context_manager(self, local_context):
self.visitBlock(mixin.block)
return _mixin
def interpolate(self,text):
return self._interpolate(text, lambda x: str(self._do_eval(x)))
def visitInclude(self, node):
raise pyjade.exceptions.CurrentlyNotSupported()
def visitExtends(self, node):
raise pyjade.exceptions.CurrentlyNotSupported()
def visitMixin(self, mixin):
if mixin.block:
self.mixins[mixin.name] = self._make_mixin(mixin)
else:
self.mixins[mixin.name](self, mixin.args)
def visitAssignment(self, assignment):
self.global_context[assignment.name] = eval(assignment.val)
def visitConditional(self, conditional):
if not conditional.sentence:
value = False
else:
value = self._do_eval(conditional.sentence)
if TYPE_CODE[conditional.type](value):
self.visit(conditional.block)
elif conditional.next:
for item in conditional.next:
self.visitConditional(item)
def visitCode(self, code):
if code.buffer:
val = code.val.lstrip()
val = self._do_eval(val)
if code.escape:
val = str(val).replace('&', '&').replace('<', '<').replace('>', '>')
self.buf.append(val)
if code.block:
self.visit(code.block)
def visitEach(self, each):
obj = self._do_eval(each.obj)
for item in obj:
local_context = dict()
if len(each.keys) > 1:
for (key, value) in zip(each.keys, item):
local_context[key] = value
else:
local_context[each.keys[0]] = item
with local_context_manager(self, local_context):
self.visit(each.block)
def attributes(self, attrs):
return " ".join(['''%s="%s"''' % (k,v) for (k,v) in attrs.items()])
def visitDynamicAttributes(self, attrs):
classes = []
params = []
for attr in attrs:
if attr['name'] == 'class':
value = self._get_value(attr)
if isinstance(value, list):
classes.extend(value)
else:
classes.append(value)
else:
value = self._get_value(attr)
if (value is not None) and (value is not False):
params.append((attr['name'], value))
if classes:
classes = [unicode(c) for c in classes]
params.append(('class', " ".join(classes)))
if params:
self.buf.append(" "+" ".join([process_param(k, v, self.terse) for (k,v) in params]))
def visitAttributes(self,attrs):
temp_attrs = []
for attr in attrs:
if attr['static']:
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
self.buf.append(' %s=%s'%(attr['name'],attr['val']))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
def process_jade(src):
parser = pyjade.parser.Parser(src)
block = parser.parse()
compiler = HTMLCompiler(block, pretty=True)
return compiler.compile()
|
xlk521/cloudguantou
|
pyjade/ext/html.py
|
Python
|
bsd-3-clause
| 5,096
|
[
"VisIt"
] |
8e93d6941d6805481e50dd00126f0e183728e974ffc69ad528b18a4eac58d105
|
"""
Regression tests for Studio's Setting page.
"""
from bok_choy.web_app_test import WebAppTest
from edxapp_acceptance.pages.studio.users import UsersPageMixin
from edxapp_acceptance.pages.studio.settings_advanced import (
AdvancedSettingsPage
)
from edxapp_acceptance.pages.studio.settings_group_configurations import (
GroupConfigurationsPage
)
from regression.tests.studio.studio_base_test import StudioBaseTestClass
from regression.pages.studio.login_studio import StudioLogin
from regression.pages.studio.settings_studio import SettingsPageExtended
from regression.tests.helpers import LoginHelper, get_course_info
from regression.pages.studio.grading_studio import GradingPageExtended
from regression.pages.studio.utils import (
get_text
)
class ScheduleAndDetailsTest(StudioBaseTestClass):
"""
Tests for Studio's Setting page.
"""
def setUp(self):
super(ScheduleAndDetailsTest, self).setUp()
self.login_page = StudioLogin(self.browser)
LoginHelper.login(self.login_page)
self.course_info = get_course_info()
self.settings_page = SettingsPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page.visit()
def test_file_format(self):
"""
Scenario: Upload course image of a wrong format.
Given that I am on the Settings page of the course.
And I click on 'Upload course card image'
And I upload the image with wrong format.
Then I should see upload error
And path in course image input box should not have
changed.
"""
# Get current course image.
current_file = self.settings_page.get_element(
'.wrapper-input input'
).get_attribute('value')
# Upload the image with wrong format.
self.settings_page.upload_course_image('README.rst')
# Assert that error is shown
self.settings_page.wait_for_element_visibility(
'.message.message-status.error.is-shown',
'Error is shown'
)
self.assertEqual(
get_text(
self.settings_page,
'.message.message-status.error.is-shown'
),
'Only JPEG or PNG files can be uploaded. '
'Please select a file ending in .jpeg or .png to upload.'
)
self.settings_page.cancel_upload()
# Assert that file path is unchanged.
self.assertEqual(
current_file,
self.settings_page.get_element(
'.wrapper-input input'
).get_attribute('value')
)
def test_cancel_upload(self):
"""
Scenario: Upload a new course card image but cancel afterwards.
Given that I am on the Settings page of the course.
And I click on 'Upload course card image'
And I upload the image
And I cancel the image upload.
And path in course image input box should not have
changed.
"""
# Get current course image.
current_file = self.settings_page.get_element(
'.wrapper-input input'
).get_attribute('value')
self.settings_page.visit()
# Upload the image.
self.settings_page.upload_course_image('Image.png')
# Cancel the upload
self.settings_page.cancel_upload()
# Course card image should be the same as before.
self.assertEqual(
current_file,
self.settings_page.get_element(
'.wrapper-input input'
).get_attribute('value'))
class ScheduleAndDetailsLinks(WebAppTest):
"""
Tests for Studio's Setting page links.
"""
def setUp(self):
super(ScheduleAndDetailsLinks, self).setUp()
self.login_page = StudioLogin(self.browser)
LoginHelper.login(self.login_page)
self.course_info = get_course_info()
self.settings_page = SettingsPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.settings_page.visit()
def test_other_grading_link(self):
"""
Verifies that user can click and navigate to Grading
"""
name = 'Grading'
grading_page = GradingPageExtended(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run'])
self.settings_page.click_other_settings_links(name)
grading_page.wait_for_page()
def test_other_course_team_link(self):
"""
Verifies that user can click and navigate to Course Team
"""
name = 'Course Team'
course_team_page = UsersPageMixin(self.browser)
self.settings_page.click_other_settings_links(name)
course_team_page.wait_for_page()
def test_other_group_configuration_link(self):
"""
Verifies that user can click and navigate to Group Configuration
"""
name = 'Group Configurations'
group_configuration = GroupConfigurationsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run'])
self.settings_page.click_other_settings_links(name)
group_configuration.wait_for_page()
def test_other_advanced_settings_link(self):
"""
Verifies that user can click and navigate to Advanced Settings
"""
name = 'Advanced Settings'
advanced_settings = AdvancedSettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run'])
self.settings_page.click_other_settings_links(name)
advanced_settings.wait_for_page()
|
raeeschachar/edx-e2e-mirror
|
regression/tests/studio/test_schedule_and_details_settings.py
|
Python
|
agpl-3.0
| 5,952
|
[
"VisIt"
] |
4733a8a4e03e4ea19894c216b98d4af4244c315e62f621ff70169f7a26197edf
|
#!/usr/bin/env python
#
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2011 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ ["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"] ])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'GPL'
__version__ = '0.8.1'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
"""
import math
import sys
import string
from functools import reduce
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if not isinstance(iterable, str):
return iterable.__len__()
try:
return len(str(iterable, 'utf'))
except:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
WHITE = ''
def bcolors_public_props():
return (name for name in dir(bcolors) if not name.startswith('_'))
def get_color_string(type, string):
end = bcolors.ENDC
if type == bcolors.WHITE:
end = ''
return '%s%s%s' % (type, string, end)
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(str, array))
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i,x in enumerate(array):
cells.append(self._str(i,x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(next(rows))
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
if type(x) is str:
return x
else:
if x is None:
return str(x)
else:
return str(x.encode('utf-8'))
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
for attr in bcolors_public_props():
cell = cell.replace(getattr(bcolors, attr), '').replace(bcolors.ENDC,'')
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
for attr in bcolors_public_props():
part = part.replace(getattr(bcolors, attr), '')
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = reduce(lambda x,y: x+y, maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
max_lengths = maxi
maxi = [(self._max_width - items * 3 -1) // items \
for n in range(items)]
# free space to distribute
free = 0
# how many columns are oversized
oversized = 0
# reduce size of columns that need less space and calculate how
# much space is freed
for col, max_len in enumerate(max_lengths):
current_length = maxi[col]
# column needs less space, adjust and
# update free space
if current_length > max_len:
free += current_length - max_len
maxi[col] = max_len
# column needs more space, count it
elif max_len > current_length:
oversized += 1
# as long as free space is available, distribute it
while free > 0:
# available free space for each oversized column
free_part = int(math.ceil(float(free) / float(oversized)))
for col, max_len in enumerate(max_lengths):
current_length = maxi[col]
# column needs more space
if current_length < max_len:
# how much space is needed
needed = max_len - current_length
# enough free space for column
if needed <= free_part:
maxi[col] = max_len
free -= needed
oversized -= 1
# still oversized after re-sizing
else:
maxi[col] = maxi[col] + free_part
free -= free_part
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
lost_color = bcolors.WHITE
original_cell = cell_line
for attr in bcolors_public_props():
cell_line = cell_line.replace(
getattr(bcolors, attr), '').replace(bcolors.ENDC,''
)
if cell_line.replace(bcolors.ENDC,'') != original_cell.replace(
bcolors.ENDC,'') and attr != 'ENDC':
if not lost_color:
lost_color = attr
fill = width - len(cell_line)
try:
cell_line = get_color_string(
getattr(bcolors, lost_color),cell_line
)
except AttributeError:
pass
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (fill//2 * space + cell_line \
+ (fill//2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
original_cell = cell
lost_color = bcolors.WHITE
for attr in bcolors_public_props():
cell = cell.replace(
getattr(bcolors, attr), '').replace(bcolors.ENDC,'')
if cell.replace(bcolors.ENDC,'') != original_cell.replace(
bcolors.ENDC,'') and attr != 'ENDC':
if not lost_color:
lost_color = attr
for c in cell.split('\n'):
if type(c) is not str:
try:
c = str(c, 'utf')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (c, strerror))
c = str(c, 'utf', 'replace')
try:
array.extend(
[get_color_string(
getattr(bcolors, lost_color),x
) for x in textwrap.wrap(c, width)
]
)
except AttributeError:
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * (missing // 2)
cell.extend([""] * (missing // 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([ [get_color_string(bcolors.GREEN, "Name Of Person"), "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
[get_color_string(bcolors.BLUE,"Mr\nBaptiste\nClement"), 1, get_color_string(bcolors.RED,"Baby")] ])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([['text', "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
ClusterHQ/dvol
|
dvol_python/texttable.py
|
Python
|
apache-2.0
| 21,967
|
[
"Brian"
] |
91c39a332a7bfebf7db0bb4fc41686fe9a270d774cc14a8950b1e52a2ce7311f
|
from __future__ import print_function
# module cubicSpline
''' k = curvatures(xData,yData).
Returns the curvatures of cubic spline at its knots.
y = evalSpline(xData,yData,k,x).
Evaluates cubic spline at x. The curvatures k can be
computed with the function 'curvatures'.
From:
http://www.dur.ac.uk/physics.astrolab/py_source/kiusalaas/v1_with_numpy/cubicSpline.py
Example
from numpy import *
import cubicSpline
x=arange(10)
y=sin(x)
xx=arange(9)+0.5
k=cubicSpline.curvatures(x,y)
yy=cubicSpline.evalSpline(x,y,k,xx)
yy
'''
import numpy as np
import LUdecomp3
from numpy import logical_and, asarray,zeros_like,floor,append
from numpy.core.umath import sqrt, exp, greater, less, cos, add, sin, \
less_equal, greater_equal
import spl_int
def spl_c(x,y):
'''
spline curvature coefficients
input: x,y data knots
returns: tuple (datax, datay, curvature coeffients at knots)
'''
n = np.size(x)
if np.size(y)==n:
return spl_int.spl_int(x,y,n)
def spl_ev(xx,xyk):
'''
evalutes spline at xx
'''
#import pdb; pdb.set_trace()
#x,y,k=xyk
#return spl_int.spl_ev(x,y,k,np.size(x),xx,np.size(xx))
return spl_int.spl_ev(xyk[0],xyk[1],xyk[2],np.size(xyk[0]),xx,np.size(xx))
def spl_cf(x,y):
'''Fast creation of a cubic Spline.
Endpoints.
Returns
-------
xyk : tuple with knots and knot derivatives y, y', y'', y
'''
n = np.size(x)
if np.size(y)==n:
return spl_int.spl_intf(x, y, n)
def spl_evf(xx, xyk, der=0):
"""
der : derivative
Example
-------
>>> x = np.arange(9)
>>> y = x**2
>>> xyk = spl_cf(x, y)
>>> spl_evf(x, xyk)
Notes
-----
y(x) = a + bx + cx**2 + dx**3
y'(x) = b + 2cx + 3dx**2
y''(x) = 2c + 6dx
y'''(x) = 6d
Endpoint:
y_n(1) = a + b + c + d = y_n
b_n(1) = b + 2c + 3d
k_n(1) = 0
d(1) = 6d
"""
xx = np.array(xx)
x, a, b, k, d = xyk
b_n = b[-1] + k[-2] + 3*d[-1] # dummy endpoints
d_n = 0
if der==0:
# y(x) = a + bx + cx**2 + dx**3
pass
elif der==1:
# y'(x) = b + 2cx + 3dx**2
a, b, k, d = np.append(b, b_n), k[:-1], 3*np.append(d,d_n), 0*d
elif der==2:
# y''(x) = 2c + 6dx = k + 6dx
# c = k/2
a, b, k, d = k, 6*d, 0*k, 0*d
elif der==3:
a, b, k, d = 6*np.append(d, d_n), 0*b, 0*k, 0*d
else:
raise Exception('Derivative %s not implemented.'%der)
return spl_int.spl_evf(x, a, b, k, d, np.size(xyk[0]), xx, np.size(xx))
def spl_eq_c(x,y):
'''
spline curvature coefficients
input: x,y data knots
returns: tuple (datax, datay, curvature coeffients at knots)
'''
n = np.size(x)
if np.size(y)==n:
return spl_int.spl_eq_int(x,y,n)
def spl_eq_ev(xx,xyk):
'''
evalutes spline at xx
xyk tuple with data knots and curvature from spl_c
'''
return spl_int.spl_eq_ev(xyk[0],xyk[1],xyk[2],np.size(xyk[0]),xx,np.size(xx))
def curva(x,y):
# faster
# Compute the hi and bi
# h,b = ( x[i+1]-xi,y[i+1]-y[i] for i,xi in enumerate(x[:-1]))
x = x[1:] - x[:-1] # =h =dx
y = (y[1:] - y[:-1]) / x # =b =dy
# Gaussian Elimination
#u[1:],v[1:] = (u for a,b in zip(u,v) )
u = 2*(x[:-1] + x[1:]) # l #c
v = 6*(y[1:] - y[:-1]) # alpha #d
#for i,h in enumerate(x[1:-1]):
#u[i+1] -= h**2/u[i]
#v[i+1] -= h*v[i]/u[i]
u[1:] -= x[1:-1]**2/u[:-1]
v[1:] -= x[1:-1]*v[:-1]/u[:-1]
# Back-substitution
y[:-1] = v/u
y[-1] = 0
#for i in range(len(y[1:])):
#y[-i-2] -= (x[-i-1]*y[-i-1]/u[-i-1])
y[-2::-1] = y[-2::-1]-(x[:0:-1]*y[:0:-1]/u[::-1])
#print y[1:0]
return append(0,y)
def curva_slow(x,y):
# Compute t,he hi and bi
# h,b = ( x[i+1]-xi,y[i+1]-y[i] for i,xi in enumerate(x[:-1]))
x = x[1:] - x[:-1] # =h =dx
y = (y[1:] - y[:-1]) / x # =b =dy
# Gaussian Elimination
#u[1:],v[1:] = (u for a,b in zip(u,v) )
#x = 2*(x[:-1] + x[1:]) # u = 2*(h[:-1] + h[1:]) = 2* dh =2 ddx
#y = 6*(y[:-1] - y[1:]) # v = 6*(b[:-1] - b[1:]) ~ ddy
#u[1]=2*(x[0]+x[1])
#v[1]=2*(y[1]-y[0])
u = 2*(x[:-1] + x[1:]) # l #c
v = 6*(y[1:] - y[:-1]) # alpha #d
for i,h in enumerate(x[1:-1]):
# if i==0: print 'slo_here'
u[i+1] = u[i+1] - h**2/u[i]
v[i+1] -= h*v[i]/u[i]
# Back-substitution
y[:-1] = v/u
y[-1] = 0
for i in range(len(y[1:])):
y[-i-2] -= (x[-i-1]*y[-i-1]/u[-i-1])
return append(0,y)
def curvatures(xData,yData):
n = len(xData) - 1
c = np.zeros((n),dtype=float)
d = np.ones((n+1),dtype=float)
e = np.zeros((n),dtype=float)
k = np.zeros((n+1),dtype=float)
c[0:n-1] = xData[0:n-1] - xData[1:n]
d[1:n] = 2.0*(xData[0:n-1] - xData[2:n+1])
e[1:n] = xData[1:n] - xData[2:n+1]
k[1:n] = 6.0*(yData[0:n-1] - yData[1:n]) /c[0:n-1] \
-6.0*(yData[1:n] - yData[2:n+1]) /e[1:n]
LUdecomp3.LUdecomp3(c,d,e)
LUdecomp3.LUsolve3(c,d,e,k)
return k
def curvatures_org(xData,yData):
n = len(xData) - 1
c = np.zeros((n),dtype=float)
d = np.ones((n+1),dtype=float)
e = np.zeros((n),dtype=float)
k = np.zeros((n+1),dtype=float)
c[0:n-1] = xData[0:n-1] - xData[1:n]
d[1:n] = 2.0*(xData[0:n-1] - xData[2:n+1])
e[1:n] = xData[1:n] - xData[2:n+1]
k[1:n] =6.0*(yData[0:n-1] - yData[1:n]) \
/(xData[0:n-1] - xData[1:n]) \
-6.0*(yData[1:n] - yData[2:n+1]) \
/(xData[1:n] - xData[2:n+1])
LUdecomp3.LUdecomp3(c,d,e)
LUdecomp3.LUsolve3(c,d,e,k)
return k
#def evalSpline_new(xData,yData,k,xx):
#global m,iLeft,iRight
#iLeft = 0
#iRight = len(xData)- 1
#m=-1
#xn=xData[1:]
#d=xn-xData[:-1]
#def do(x):
#global m,iLeft,iRight
#m+=1
#h = d[i]
#A = (x - xn[i])/h
#B = (x - xData[i])/h
#return ((A**3 - A)*k[i] - (B**3 - B)*k[i+1])/6.0*h*h \
#+ (yData[i]*A - yData[i+1]*B)
#for x in xx;while x >= xData[iLeft] and iLeft<iRight: iLeft += 1
#i=iLeft-1]
#return map( lambda x: do(x), xx)
def evalSpline_for(xData,yData,k,xx):
# very slow
h = xData[1]-xData[0]
n=np.arange(len(xData)-1)
for i,x in enumerate(xx):
a = (x - (i+1))/h
b = a+1
#print i
xx[i]=((a-a**3)*k[i] - (b-b**3)*k[i+1])/6.0*h*h \
- (yData[i]*a - yData[i+1]*b)
return xx
def evalSpline_vec(xData,yData,k,xx):
h = xData[1]-xData[0]
n=np.arange(len(xx))
AA = (xx - (n+1))/h
BB = AA+1
return ((AA-AA**3)*k[:-1] - (BB-BB**3)*k[1:])/6.0*h*h \
- (yData[:-1]*AA - yData[1:]*BB)
def evalSpline_gen(xData,yData,k,xx):
# generator expression
h = xData[1]-xData[0]
n=np.arange(len(xx))
AA = (xx - (n+1))/h
BB = AA+1
return (((AA[i]-AA[i]**3)*k[i] - (BB[i]-BB[i]**3)*k[i+1])*h*h/6.0 \
- (yData[i]*AA[i] - yData[i+1]*BB[i]) for i in np.arange(len(xx)))
def evalSpline(xData,yData,k,xx):
# generator expression
# S(x)=a_i+b_i(x-x_i)+c_i(x-x_i)^2+d_i(x-x_i)^2
# a_i=y_i
# b_i=-h_i/6*z_(i+1)-h_i/3*z_i+ (y_(i+1)-y_i)/h_i
# c_i=z_i/2
# d_i= (z_(i+1)-z_i)/6/h_i
# x=x-x_i=x-i = > S=a+x*(b+x*(c+x*d))
h = xData[1]-xData[0]
n=np.arange(len(xData)-1)
AA = (xx - (n+1))/h
BB = AA+1
return ( yData[i]+x*( -k[i+1]/6. - k[i]/3 + (yData[i+1]-yData[i]) + x*(k[i]/2 +x *(k[i+1]-k[i])/6)) for i,x in enumerate(xx-np.arange(len(xx))) )
#def evalSpline(xData,yData,k,xx):
## generator expression
#h = xData[1]-xData[0]
#n=np.arange(len(xData)-1)
#AA = (xx - (n+1))/h
#BB = AA+1
#return (((a-a**3)*k0 - (b-b**3)*k1)/6.0*h*h \
#- (y0*a - y1*b) for a,b,k0,k1,y,y1 in zip(AA,BB,k[:-1],k[1:],yData[:.1],yData[1:]))
def evalSpline_old2(xData,yData,k,xx):
y = np.empty_like(xx)
iLeft = 0
iRight = len(xData)- 1
m=-1
for x in xx:
m+=1
while x >= xData[iLeft] and iLeft<iRight: iLeft += 1
i=iLeft-1
h = xData[i] - xData[i+1]
A = (x - xData[i+1])/h
B = (x - xData[i])/h
y[m]= ((A**3 - A)*k[i] - (B**3 - B)*k[i+1])/6.0*h*h \
+ (yData[i]*A - yData[i+1]*B)
return y
def evalSpline_old(xData,yData,k,xx):
def findSegment(xData,x,i):
iLeft = i
iRight = len(xData)- 1
while 1: #Bisection
if (iRight-iLeft) <= 1: return iLeft
i =(iLeft + iRight)/2
if x < xData[i]: iRight = i
else: iLeft = i
yy = []
i = 0
for x in xx:
i = findSegment(xData,x,i)
h = xData[i] - xData[i+1]
y = ((x - xData[i+1])**3/h - (x - xData[i+1])*h)*k[i]/6.0 \
- ((x - xData[i])**3/h - (x - xData[i])*h)*k[i+1]/6.0 \
+ (yData[i]*(x - xData[i+1]) \
- yData[i+1]*(x - xData[i]))/h
yy.append(y)
if i<10: print(i,y,x, k[i],x - xData[i])
return np.array(yy)
def cubic(x):
"""A cubic B-spline.
This is a special case of `bspline`, and equivalent to ``bspline(x, 3)``.
"""
ax = abs(asarray(x))
res = zeros_like(ax)
cond1 = less(ax, 1)
if cond1.any():
ax1 = ax[cond1]
res[cond1] = 2.0 / 3 - 1.0 / 2 * ax1 ** 2 * (2 - ax1)
cond2 = ~cond1 & less(ax, 2)
if cond2.any():
ax2 = ax[cond2]
res[cond2] = 1.0 / 6 * (2 - ax2) ** 3
return res
def csp_eval(cj, newx, dx=1.0, x0=0):
"""Evaluate a spline at the new set of points.
`dx` is the old sample-spacing while `x0` was the old origin. In
other-words the old-sample points (knot-points) for which the `cj`
represent spline coefficients were at equally-spaced points of:
oldx = x0 + j*dx j=0...N-1, with N=len(cj)
Edges are handled using mirror-symmetric boundary conditions.
"""
newx = (asarray(newx) - x0) / float(dx)
res = zeros_like(newx)
if res.size == 0:
return res
N = len(cj)
cond1 = newx < 0
cond2 = newx > (N - 1)
cond3 = ~(cond1 | cond2)
# handle general mirror-symmetry
res[cond1] = csp_eval(cj, -newx[cond1])
res[cond2] = csp_eval(cj, 2 * (N - 1) - newx[cond2])
newx = newx[cond3]
if newx.size == 0:
return res
result = zeros_like(newx)
jlower = floor(newx - 2).astype(int) + 1
for i in range(4):
thisj = jlower + i
indj = thisj.clip(0, N - 1) # handle edge cases
result += cj[indj] * cubic(newx - thisj)
res[cond3] = result
return res
|
mzechmeister/serval
|
src/cubicSpline.py
|
Python
|
mit
| 10,659
|
[
"Gaussian"
] |
c72890e6cb0dd9a2f6944d61209e351ff2962350ea8acdd36e5d1ad2efc13c81
|
# -*- coding: utf-8 -*-
# ***********************************************************************
# Copyright (C) 2016 - 2017 Oscar Gerardo Lazo Arjona *
# <oscar.lazoarjona@physics.ox.ac.uk> *
# ***********************************************************************
r"""This is a simple example of usage of the ORCA memory with the
default settings.
"""
from time import time
from quantum_memories import hyperfine_orca
from quantum_memories.misc import set_parameters_ladder, efficiencies
import numpy as np
# [6.34, 8.77, 14.50, 19.37]
# [6.50, 9.85, 14.58, 19.92]
cus_params = {"Temperature": 273.15+90,
"alpha_rw": np.sqrt(20.0), "Nz": 50,
"element": "Rb", "isotope": 85}
params = set_parameters_ladder(cus_params)
print params["isotope"], params["element"]
print params["gamma21"]/2/3.141592*1e-6
print params["gamma32"]/2/3.141592*1e-6
if __name__ == '__main__':
name = "test_rb"
# Benchmark with plotting.
t0 = time()
# hyperfine_orca.solve(params, plots=True, name=name)
t, Z, vZ, rho, E01 = hyperfine_orca.solve(params, plots=True, name=name)
tsolve = time()-t0
t0 = time()
eff_in, eff_out, eff = efficiencies(t, E01, params,
plots=True, name=name, rabi=False)
teff = time()-t0
nfun = 0
print "Including plotting times:"
print "The solve function took", tsolve, "s."
print "The efficiencies function took", teff, "s."
print "The efficiencies were:", eff_in, eff_out, eff
# Benchmark without plotting.
t0 = time()
t, Z, vZ, rho, Om1 = hyperfine_orca.solve(params, plots=False, name=name)
tsolve = time()-t0
t0 = time()
eff_in, eff_out, eff = efficiencies(t, Om1, params,
plots=False, name=name, rabi=False)
teff = time()-t0
nfun = 0
print
print "Including plotting times:"
print "The solve function took", tsolve, "s."
print "The efficiencies function took", teff, "s."
print "The efficiencies were:", eff_in, eff_out, eff
|
oscarlazoarjona/quantum_memories
|
examples/hyperfine_orca/simple_example_rb.py
|
Python
|
gpl-3.0
| 2,109
|
[
"ORCA"
] |
4a18849a5097406e352cd63ef5da19ed7835855b1f0eb1b8ff3e51fbd458b1af
|
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
import argparse
from collections import defaultdict
import inspect
import logging
import progressbar
from typing import Any, DefaultDict, Dict, List, Set, Type
from catmaid.apps import get_system_user
from catmaid.control.annotationadmin import copy_annotations
from catmaid.control.edge import rebuild_edge_tables, rebuild_edges_selectively
import catmaid.models
from catmaid.models import (Class, ClassClass, ClassInstance,
ClassInstanceClassInstance, Project, Relation, User, Treenode,
Connector, Concept, SkeletonSummary)
from catmaid.util import str2bool
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from .common import set_log_level
logger = logging.getLogger(__name__)
# Dependency based order of central models
ordered_save_tasks = [Project, User, Class, Relation, ClassClass,
ClassInstance, ClassInstanceClassInstance, Treenode, Connector]
def ask_a_b(a, b, title):
"""Return true if a, False if b.
"""
def ask():
selection = input(title + " ").strip()
if selection == a:
return True
if selection == b:
return False
return None
while True:
d = ask()
if d is not None:
return d
print(f"Please answer only '{a}' or '{b}'")
def ask_yes_no(title):
"""Return true if yes, False if no.
"""
return ask_a_b('y', 'n', title)
def ask_for_user(title):
""" Return a valid user object.
"""
def ask():
print("User selection:")
users = User.objects.all()
for n,u in enumerate(users):
print("%s: %s (ID %s)" % (n, u, u.id))
print(title)
selection = input("Please enter the number of the user wanted: ")
try:
return users[int(selection)]
except (ValueError, IndexError):
return None
while True:
u = ask()
if u:
return u
class AbstractImporter(ABC):
def __init__(self, source, target, user, options):
self.source = source
self.target = target
self.options = options
self.user = user
super().__init__()
@abstractmethod
def import_data(self):
pass
class FileImporter(AbstractImporter):
def __init__(self, source, target, user, options):
super().__init__(source, target, user, options)
self.create_unknown_users = options['create_unknown_users']
self.auto_name_unknown_users = options['auto_name_unknown_users']
self.next_auto_name_id = 1
self.user_map = dict(User.objects.all().values_list('username', 'id'))
self.user_id_map = dict((v,k) for k,v in self.user_map.items())
self.preserve_ids = options['preserve_ids']
self.format = 'json'
# Map user IDs to newly created users
self.created_unknown_users = dict()
def map_or_create_users(self, obj, import_users, replacement_users,
mapped_user_ids, mapped_user_target_ids, created_users):
"""Update user information of a CATMAID model object. The parameters
<mapped_users>, <mapped_user_target_ids> and <created_users> are output
parameters and are expected to have the types set, set and dict.
"""
map_users = self.options['map_users']
map_user_ids = self.options['map_user_ids']
# Try to look at every user reference field in CATMAID.
for ref in ('user', 'reviewer', 'editor'):
id_ref = ref + "_id"
obj_username = None
if hasattr(obj, id_ref):
obj_user_ref_id = getattr(obj, id_ref)
import_user = import_users.get(obj_user_ref_id)
existing_user_id = None
existing_user_same_id = self.user_id_map.get(obj_user_ref_id)
# If user data is imported, <imported_user> will be available
# and matching with existing users is done by user name. If
# there is no user data for this user in the imported data,
# mapping can optionally be done by ID or new users are
# created. If explicit mappings are asked for, they will
# override all others switches.
if import_user:
import_user = import_user.object
replacement_name = replacement_users.get(import_user.username)
obj_username = replacement_name or import_user.username
existing_user_id = self.user_map.get(obj_username)
# Map users if usernames match
if existing_user_id is not None:
# If a user with this username exists already, update
# the user reference the existing user if --map-users is
# set or a replacement is explicitly asked for. If no
# existing user is available, use imported user, if
# available. Otherwise complain.
if map_users or replacement_name:
setattr(obj, id_ref, existing_user_id)
mapped_user_ids.add(obj_user_ref_id)
mapped_user_target_ids.add(existing_user_id)
elif import_user:
raise CommandError(f"Referenced user \"{obj_username}\" " +
"exists both in database and in import data. If the " +
"existing user should be used, please use the " +
"--map-users option to map all users or "
"--username-mapping=\"import-user=target-user\" " +
"for individual users")
else:
raise CommandError(f"Referenced user \"{obj_username}\"" +
"exists in database, but not in import data. If the " +
" existing user should be used, please use the " +
"--map-users option")
elif import_user:
if import_user.id in self.user_id_map:
import_user.id = None
import_user.save()
else:
import_user.is_active = False
created_users[obj_username] = import_user
setattr(obj, ref, import_user)
elif self.create_unknown_users:
user = created_users.get(obj_username)
if not user:
logger.info("Created new inactive user: " + obj_username)
user = User.objects.create(username=obj_username)
user.is_active = False
user.save()
created_users[obj_username] = user
setattr(obj, ref, user)
else:
raise CommandError(f"User \"{obj_username}\" is not " +
"found in existing data or import data. Please use " +
"--user or --create-unknown-users")
elif map_user_ids and existing_user_same_id is not None:
mapped_user_ids.add(obj_user_ref_id)
mapped_user_target_ids.add(obj_user_ref_id)
elif self.create_unknown_users:
user = self.created_unknown_users.get(obj_user_ref_id)
if not user:
if self.auto_name_unknown_users:
logger.info("Creating new inactive user for imported " +
f"user ID {obj_user_ref_id}. No name information was " +
"available and CATMAID will generate a name.")
else:
logger.info("Creating new inactive user for imported " +
f"user ID {obj_user_ref_id}. No name information was " +
"available, please enter a new username.")
while True:
if self.auto_name_unknown_users:
new_username = f"User {self.next_auto_name_id}"
self.next_auto_name_id += 1
if not self.user_map.get(new_username):
break
else:
new_username = input("New username: ").strip()
if not new_username:
logger.info("Please enter a valid username")
elif self.user_map.get(new_username):
logger.info(f"The username '{new_username}' " +
"exists already, choose a different one")
else:
break
user = User.objects.create(username=new_username)
user.is_active = False
user.save()
created_users[new_username] = user
self.created_unknown_users[obj_user_ref_id] = user
setattr(obj, ref, user)
else:
raise CommandError(f'Could not find referenced user "{obj_user_ref_id}" ' +
'in imported data. Try using the --map-users option to map all users ' +
'or --username-mapping="import-user=target-user" for individual ' +
'users. Additionally --create-unknown-users can be used to create ' +
'missing users, optionally naming them automatically using ' +
'--auto-name-unknown-users.')
def reset_ids(self, target_classes, import_objects,
import_objects_by_type_and_id, existing_classes,
map_treenodes=True, save=True):
"""Reset the ID of each import object to None so that a new object will
be created when the object is saved. At the same time an index is
created that allows per-type lookups of foreign key fields
"""
logger.info("Building foreign key update index")
# Build index for foreign key fields in models. For each type, map
# each foreign key name to a model class.
fk_index:DefaultDict[Any, Dict] = defaultdict(dict)
for c in target_classes:
class_index = fk_index[c]
foreign_key_fields = [
f for f in c._meta.get_fields()
if f.is_relation
and f.many_to_one # ForeignKey instances
# if field.get_internal_type() == 'ForeignKey':
and f.related_model in target_classes
]
for field in foreign_key_fields:
# Get the database column name for this field
class_index[field.attname] = field.related_model
logger.info("Updating foreign keys to imported objects with new IDs")
all_classes:Dict = dict()
all_classes.update(existing_classes)
updated_fk_ids = 0
unchanged_fk_ids = 0
explicitly_created_summaries = 0
other_tasks = set(import_objects.keys()) - set(ordered_save_tasks)
# Iterate objects to import and respect dependency order
for object_type in ordered_save_tasks + list(other_tasks):
objects = import_objects.get(object_type)
if not objects:
# No objects of this object type are imported
continue
fk_fields = fk_index[object_type]
# No need to do rest if there are no foreign keys to change to begin
# with.
if len(fk_fields) == 0:
continue
imported_parent_nodes = []
bar_prefix = f"- {object_type.__name__}: "
for deserialized_object in progressbar.progressbar(objects,
max_value=len(objects), redirect_stdout=True,
prefix=bar_prefix):
obj = deserialized_object.object
obj_type = type(obj)
for fk_field, fk_type in fk_fields.items():
# Get import object with the former ID referenced in
# this field.
current_ref = getattr(obj, fk_field)
# Only attempt a mapping if the foreign key isn't NULL
if current_ref:
# Get updated model objects of the referenced type
imported_objects_by_id = import_objects_by_type_and_id[fk_type]
ref_obj = imported_objects_by_id.get(current_ref)
if ref_obj:
# Update foreign key reference to ID of newly saved
# object. Only for treenodes this is expected to result
# in not yet available data
if object_type == Treenode and fk_type == Treenode:
imported_parent_nodes.append((obj, current_ref))
elif ref_obj.id is None:
raise ValueError(f"The referenced {fk_type} object '{ref_obj}' with import ID {current_ref} wasn't stored yet")
setattr(obj, fk_field, ref_obj.id)
updated_fk_ids += 1
else:
unchanged_fk_ids += 1
# Save objects if they should either be imported or have change
# foreign key fields
if save and (updated_fk_ids or obj.id is None):
obj.save()
# Treenodes are special, because they can reference themselves. They
# need therefore a second iteration of reference updates after all
# treenodes have been saved and new IDs are available.
if map_treenodes and object_type == Treenode:
logger.info('Mapping parent IDs of treenodes to imported data')
imported_objects_by_id = import_objects_by_type_and_id[Treenode]
for obj, parent_id in progressbar.progressbar(imported_parent_nodes,
max_value=len(imported_parent_nodes),
redirect_stdout=True, prefix="- Mapping parent treenodes: "):
new_parent = imported_objects_by_id.get(parent_id)
if not new_parent:
raise ValueError(f"Could not find imported treenode {parent_id}")
obj.parent_id = new_parent.id
if save:
obj.save()
# Update list of known classes after new classes have been saved
if object_type == Class:
for deserialized_object in objects:
obj = deserialized_object.object
all_classes[obj.class_name] = obj.id
# If skeleton class instances are created, make sure the skeleton
# summary table entries for the respective skeletons are there.
# Otherwise the ON CONFLICT claues of the summary update updates can
# be called multiple times. The alternative is to disable the
# trigger during import.
pre_create_summaries = False
if object_type == ClassInstance and pre_create_summaries:
last_editor = get_system_user()
skeleton_class_id = all_classes.get('skeleton')
for deserialized_object in objects:
obj = deserialized_object.object
if obj.class_column_id == skeleton_class_id:
r = SkeletonSummary.objects.get_or_create(project=self.target,
skeleton_id=obj.id, defaults={'last_editor': last_editor})
explicitly_created_summaries += 1
logger.info("".join([f"{updated_fk_ids} foreign key references updated, {unchanged_fk_ids} did not ",
f"require change, {explicitly_created_summaries} skeleton summaries were created"]))
def override_fields(self, obj):
# Override project to match target project
if hasattr(obj, 'project_id'):
obj.project = self.target
# Override all user references with pre-defined user
if self.user:
if hasattr(obj, 'user_id'):
obj.user = self.user
if hasattr(obj, 'reviewer_id'):
obj.reviewer = self.user
if hasattr(obj, 'editor_id'):
obj.editor = self.user
@transaction.atomic
def import_data(self):
""" Imports data from a file and overrides its properties, if wanted.
This method also deactivates auto commit (if it is activated)
temporary.
"""
cursor = connection.cursor()
# Defer all constraint checks
cursor.execute('SET CONSTRAINTS ALL DEFERRED')
# Drop summary table trigger to make insertion faster
cursor.execute("""
DROP TRIGGER on_edit_treenode_update_summary_and_edges ON treenode;
DROP TRIGGER on_insert_treenode_update_summary_and_edges ON treenode;
DROP TRIGGER on_delete_treenode_update_summary_and_edges ON treenode;
""")
# Get all existing users so that we can map them based on their username.
mapped_user_ids:Set = set()
mapped_user_target_ids:Set = set()
# Map data types to lists of object of the respective type
import_data:DefaultDict[Any, List] = defaultdict(list)
n_objects = 0
# Read the file and sort by type
logger.info(f"Loading data from {self.source}")
with open(self.source, "r") as data:
loaded_data = serializers.deserialize(self.format, data)
for deserialized_object in progressbar.progressbar(loaded_data,
max_value=progressbar.UnknownLength, redirect_stdout=True):
obj = deserialized_object.object
import_data[type(obj)].append(deserialized_object)
n_objects += 1
if n_objects == 0:
raise CommandError("Nothing to import, no importable data found")
created_users:Dict = dict()
if User in import_data:
import_users = dict((u.object.id, u) for u in import_data[User])
logger.info(f"Found {len(import_users)} referenceable users in import data")
else:
import_users = dict()
logger.info("Found no referenceable users in import data")
username_mapping = {}
for m in self.options['username_mapping'] or []:
username_mapping[m[0]] = m[1]
logger.info(f'Mapping import user "{m[0]}" to target user "{m[1]}"')
# Get CATMAID model classes, which are the ones we want to allow
# optional modification of user, project and ID fields.
app = apps.get_app_config('catmaid')
user_updatable_classes = set(app.get_models())
logger.info(f"Adjusting {n_objects} import objects to target database")
# Needed for name uniquness of classes, class_instances and relations
existing_classes = dict(Class.objects.filter(project_id=self.target.id) \
.values_list('class_name', 'id'))
existing_relations = dict(Relation.objects.filter(project_id=self.target.id) \
.values_list('relation_name', 'id'))
existing_class_instances = dict(ClassInstance.objects.filter(project_id=self.target.id) \
.values_list('name', 'id'))
existing_concept_ids = set(Concept.objects.all().values_list('id', flat=True))
# Find classes for neurons and skeletons in import data
if Class in import_data:
allowed_duplicate_classes = tuple(c.object.id
for c in import_data[Class]
if c.object.class_name in ('neuron', 'skeleton'))
else:
allowed_duplicate_classes = tuple()
n_reused = 0
n_moved = 0
append_only = not self.preserve_ids
need_separate_import = []
objects_to_save:DefaultDict[Any, List] = defaultdict(list)
import_objects_by_type_and_id:DefaultDict[Any, Dict] = defaultdict(dict)
for object_type, import_objects in import_data.items():
# Allow user reference updates in CATMAID objects
if object_type not in user_updatable_classes:
need_separate_import.append(object_type)
continue
# Stores in append-only mode import IDs and links them to the
# respective objects. This is needed, to update foreign keys to this
# ID when it is replaced with a new ID.
objects_by_id = import_objects_by_type_and_id[object_type]
is_class = object_type == Class
is_relation = object_type == Relation
is_class_instance = object_type == ClassInstance
# CATMAID model objects are inspected for user fields
for deserialized_object in import_objects:
obj = deserialized_object.object
# Semantic data like classes and class instances are expected to
# be unique with respect to their names. Existing objects with
# the same ID will get a new ID even if --preserve-ids is set.
existing_obj_id = None
concept_id_exists = obj.id in existing_concept_ids
if is_class:
existing_obj_id = existing_classes.get(obj.class_name)
if is_relation:
existing_obj_id = existing_relations.get(obj.relation_name)
if is_class_instance:
existing_obj_id = existing_class_instances.get(obj.name)
# Neurons (class instances of class "neuron" and "skeleton")
# are a special case. There can be multiple neurons with
# the same name, something that is not allowed in other
# cases. In this particular case, however, class instance
# reuse is not wanted.
if existing_obj_id and obj.class_column_id in allowed_duplicate_classes:
existing_obj_id = None
concept_id_exists = False
if existing_obj_id is not None:
# Add mapping so that existing references to it can be
# updated. The object itself is not marked for saving,
# because it exists already.
current_id = obj.id
objects_by_id[current_id] = obj
obj.id = existing_obj_id
n_reused += 1
continue
# If there is already an known object with the ID of the object
# we are importing at the moment and the current model is a
# class, relation or class_instance, then the imported object
# will get a new ID, even with --preservie-ids set. We reuse
# these types.
if concept_id_exists:
current_id = obj.id
objects_by_id[current_id] = obj
obj.id = None
n_moved += 1
# Replace existing data if requested
self.override_fields(obj)
# Map users based on username, optionally create unmapped users.
self.map_or_create_users(obj, import_users, username_mapping,
mapped_user_ids, mapped_user_target_ids, created_users)
# Remove pre-defined ID and keep track of updated IDs in
# append-only mode (default).
if append_only:
current_id = obj.id
objects_by_id[current_id] = obj
# By setting id to None, Django will create a new object and
# set the new ID.
obj.id = None
# Remember for saving
objects_to_save[object_type].append(deserialized_object)
if len(created_users) > 0:
logger.info("Created {} new users: {}".format(len(created_users),
", ".join(sorted([u.username for u in created_users.values()]))))
else:
logger.info("No unmapped users imported")
# Finally save all objects. Make sure they are saved in order:
logger.info(f"Storing {n_objects - n_reused} database objects including {n_moved} moved objects, reusing additional {n_reused} existing objects")
# In append-only mode, the foreign keys to objects with changed IDs have
# to be updated. In preserve-ids mode only IDs to classes and relations
# will be updated. Saving model objects after an update of referenced
# keys is only needed in append-only mode.
self.reset_ids(user_updatable_classes, objects_to_save,
import_objects_by_type_and_id, existing_classes)
other_tasks = set(objects_to_save.keys()) - set(ordered_save_tasks)
for object_type in ordered_save_tasks + list(other_tasks):
objects = objects_to_save.get(object_type)
if objects:
logger.info("- Importing objects of type " + object_type.__name__)
for deserialized_object in progressbar.progressbar(objects,
max_value=len(objects), redirect_stdout=True):
deserialized_object.save()
logger.info("- Importing all other objects")
for other_model in progressbar.progressbar(need_separate_import,
max_value=len(need_separate_import), redirect_stdout=True):
other_objects = import_data[other_model]
if other_model == User:
# If user model objects are imported and users were mapped, ask
# user if alrady mapped users should be skipped during import.
# We don't need to take of newly created users, because they are
# only created if no model is found. Therefore all other model
# objects can be imported.
if mapped_user_target_ids:
mapped_usernames = set(self.user_id_map.get(u) for u in mapped_user_target_ids)
import_usernames = set(import_users.keys())
not_imported_usernames = import_usernames - mapped_usernames
already_imported_usernames = import_usernames - not_imported_usernames
if already_imported_usernames:
print("The following usernames are mapped to " +
"existing users, but the import data " +
"also contains objects for these users: " +
", ".join(already_imported_usernames))
ignore_users = ask_yes_no("Skip those users in input "
"data and don't import them? [y/n]")
if ignore_users:
logger.info("Won't import mapped users: " +
", ".join(already_imported_usernames))
other_objects = [u for u in other_objects \
if u.object.username not in already_imported_usernames]
else:
logger.info("Will import all listed users in import data")
for deserialized_object in other_objects:
if deserialized_object.object.username in created_users.keys():
deserialized_object.save()
# Reset counters to current maximum IDs
cursor.execute('''
SELECT setval('concept_id_seq', coalesce(max("id"), 1), max("id") IS NOT null)
FROM concept;
SELECT setval('location_id_seq', coalesce(max("id"), 1), max("id") IS NOT null)
FROM location;
SELECT setval('auth_user_id_seq', coalesce(max("id"), 1), max("id") IS NOT null)
FROM auth_user;
''')
cursor.execute("""
CREATE TRIGGER on_insert_treenode_update_summary_and_edges
AFTER INSERT ON treenode
REFERENCING NEW TABLE as inserted_treenode
FOR EACH STATEMENT EXECUTE PROCEDURE on_insert_treenode_update_summary_and_edges();
CREATE TRIGGER on_edit_treenode_update_summary_and_edges
AFTER UPDATE ON treenode
REFERENCING NEW TABLE as new_treenode OLD TABLE as old_treenode
FOR EACH STATEMENT EXECUTE PROCEDURE on_edit_treenode_update_summary_and_edges();
CREATE TRIGGER on_delete_treenode_update_summary_and_edges
AFTER DELETE ON treenode
REFERENCING OLD TABLE as deleted_treenode
FOR EACH STATEMENT EXECUTE PROCEDURE on_delete_treenode_update_summary_and_edges();
""")
n_imported_treenodes = len(import_objects_by_type_and_id.get(Treenode, []))
n_imported_connectors = len(import_objects_by_type_and_id.get(Connector, []))
if self.options.get('update_project_materializations'):
if n_imported_connectors or n_imported_connectors:
logger.info(f"Updating edge tables for project {self.target.id}")
rebuild_edge_tables(project_ids=[self.target.id], log=lambda msg: logger.info(msg))
else:
logger.info("No edge table update needed")
if n_imported_treenodes:
logger.info("Updated skeleton summary tables")
cursor.execute("""
DELETE FROM catmaid_skeleton_summary;
SELECT refresh_skeleton_summary_table();
""")
logger.info('Recreating skeleton summary table')
cursor.execute("""
TRUNCATE catmaid_skeleton_summary;
SELECT refresh_skeleton_summary_table();
""")
else:
logger.info("No skeleton summary update needed")
else:
logger.info("Finding imported skeleton IDs and connector IDs")
connector_ids:List = []
connectors = objects_to_save.get(Connector)
if connectors:
connector_ids.extend(i.object.id for i in connectors)
# Find all skeleton classes both in imported data and existing data.
skeleton_classes = set()
classes = objects_to_save.get(Class)
if classes:
for deserialized_object in classes:
c = deserialized_object.object
if c.class_name == 'skeleton':
skeleton_classes.add(c.id)
cursor.execute("""
SELECT id FROM class WHERE class_name = 'skeleton'
""")
for row in cursor.fetchall():
skeleton_classes.add(row[0])
skeleton_ids = []
class_instances = objects_to_save.get(ClassInstance)
if class_instances:
for deserialized_object in class_instances:
ci = deserialized_object.object
# Check if the class reference is a "skeleton" class
if ci.class_column_id in skeleton_classes:
skeleton_ids.append(ci.id)
if skeleton_ids or connector_ids:
logger.info(f"Updating edge tables for {len(skeleton_ids)} skeleton(s) " + \
f"and {len(connector_ids)} connector(s)")
rebuild_edges_selectively(skeleton_ids, connector_ids, log=lambda msg: logger.info(msg))
else:
logger.info("No materialization to update: no skeleton IDs or " \
"connector IDs found in imported data")
# Skeleton summary
if skeleton_ids:
logger.info('Recreating skeleton summary table entries for imported skeletons')
cursor.execute("""
SELECT refresh_skeleton_summary_table_selectively(%(skeleton_ids)s);
""", {
'skeleton_ids': skeleton_ids,
})
else:
logger.info('No skeleton summary table updated needed')
class InternalImporter(AbstractImporter):
def import_data(self):
# Process with import
o = self.options
copy_annotations(self.source.id, self.target.id,
o['import_treenodes'], o['import_connectors'],
o['import_annotations'], o['import_tags'])
def str2tuple(s):
"""Convert a string of the form a=b into a tuple (a,b).
"""
parts = s.split('=')
if len(parts) != 2:
raise argparse.ArgumentTypeError("Argument \"%s\" is not of form import-username=target-username" % (s))
return (parts[0].strip(), parts[1].strip())
class Command(BaseCommand):
help = "Import new or existing data into an existing CATMAID project"
def add_arguments(self, parser):
parser.add_argument('--source', dest='source', default=None,
help='The ID of the source project or the path to a file to import')
parser.add_argument('--target', dest='target', default=None,
help='The ID of the target project')
parser.add_argument('--user', dest='user', default=None,
help='The ID of the owner of all created objects')
parser.add_argument('--treenodes', dest='import_treenodes',
type=str2bool, nargs='?', const=True, default=True,
help='Import treenodes from source')
parser.add_argument('--connectors', dest='import_connectors',
type=str2bool, nargs='?', const=True, default=True,
help='Import connectors from source')
parser.add_argument('--annotations', dest='import_annotations',
type=str2bool, nargs='?', const=True, default=True,
help='Import annotations from source')
parser.add_argument('--tags', dest='import_tags',
type=str2bool, nargs='?', const=True, default=True,
help='Import tags from source')
parser.add_argument('--volumes', dest='import_volumes',
type=str2bool, nargs='?', const=True, default=True,
help='Import volumes from source')
parser.add_argument('--map-users', dest='map_users', default=True,
const=True, type=lambda x: (str(x).lower() == 'true'), nargs='?',
help='Use existing user if username matches')
parser.add_argument('--map-user-ids', dest='map_user_ids', default=False,
const=True, type=lambda x: (str(x).lower() == 'true'), nargs='?',
help='Use existing user if user ID matches as a last option before new users would be created')
parser.add_argument('--username-mapping', dest='username_mapping', default=[],
type=str2tuple, action='append',
help='Map an import username to a target instance username. Maps referenced users regardless of --map-users. The expected format is "import-user=existing-user", e.g. --username-mapping="AnonymousUser=AnonymousUser".')
parser.add_argument('--create-unknown-users', dest='create_unknown_users', default=True,
action='store_true', help='Create new inactive users for unmapped or unknown users referenced in inport data.')
parser.add_argument('--auto-name-unknown-users', dest='auto_name_unknown_users', default=False,
action='store_true', help='If enabled, newly created unknown users will be named "User <n>" where <n> is an increasing number. Requires --create-unknown-users')
parser.add_argument('--preserve-ids', dest='preserve_ids', default=False,
action='store_true', help='Use IDs provided in import data. Warning: this can cause changes in existing data.')
parser.add_argument('--no-analyze', dest='analyze_db', default=True,
action='store_false', help='If ANALYZE to update database statistics should not be called after the import.')
parser.add_argument('--update-project-materializations', dest='update_project_materializations', default=False,
action='store_true', help='Whether all materializations (edges, summary) of the current project should be updated or only the ones of imported skeletons.')
def ask_for_project(self, title):
""" Return a valid project object.
"""
def ask():
print("Please enter 'n' or the number of the desired %s project:" % title)
print("n: Create new project")
projects = Project.objects.all()
for n,p in enumerate(projects):
print("%s: %s (ID %s)" % (n, p, p.id))
selection = input("Selection: ").strip()
try:
if selection == 'n':
new_project_name = input("Name of new project: ").strip()
return Project.objects.create(title=new_project_name)
return projects[int(selection)]
except (ValueError, IndexError):
return None
while True:
p = ask()
if p:
return p
def handle(self, *args, **options):
set_log_level(logger, options.get('verbosity', 1))
if options['map_users'] and options['user']:
raise CommandError("Can't override users and map users at the " +
"same time, use --user or --map-users.")
# Give some information about the import
will_import = []
wont_import = []
for t in ('treenodes', 'connectors', 'annotations', 'tags'):
if options.get('import_' + t):
will_import.append(t)
else:
wont_import.append(t)
if will_import:
logger.info("Will import: " + ", ".join(will_import))
else:
logger.info("Nothing selected for import")
return
if wont_import:
logger.info("Won't import: " + ", ".join(wont_import))
# Read soure and target
if options['source']:
try:
source = Project.objects.get(pk=int(options['source']))
logger.info("Using internal importer")
Importer: Type[AbstractImporter] = InternalImporter
except ValueError:
source = options['source']
logger.info("Using file importer")
Importer = FileImporter
else:
source = self.ask_for_project('source')
if not options['target']:
target = self.ask_for_project('target')
else:
target = Project.objects.get(pk=options['target'])
override_user = None
if options['user']:
override_user = User.objects.get(pk=options['user'])
logger.info(f'All imported objects will be owned by user "{override_user.username}"')
else:
if options['map_users']:
logger.info("Users referenced in import will be mapped to "
"existing users if the username matches")
if options['map_user_ids']:
logger.info("Users referenced only as ID in import will be "
"mapped to existing users with matching IDs.")
if options['create_unknown_users']:
logger.info("Unknown users will be created")
if not options['map_users'] and not options['create_unknown_users'] \
and not options['map_user_ids']:
override_user = ask_for_user("All imported objects need a user "
"and no mapping or creation option was provided. Please "
"select a user that should take ownership of all "
"imported objects. Alternatively, use the --map-users "
"option to map imported users to existing users based "
"on their username.")
importer = Importer(source, target, override_user, options)
importer.import_data()
if options['analyze_db']:
cursor = connection.cursor()
logger.info("Updating database statistics")
cursor.execute("ANALYZE")
logger.info("Finished import into project with ID %s" % importer.target.id)
|
tomka/CATMAID
|
django/applications/catmaid/management/commands/catmaid_import_data.py
|
Python
|
gpl-3.0
| 41,280
|
[
"NEURON"
] |
ab02b49861ad86337a80358d9fbea19c24e687e687edfa26a9b59360a0751281
|
# This is to be used in the CloudServer deployment,
# it currently has all the urls for lems_ui, cenet, results_viewer, rtw_ui
from django.conf.urls import patterns, include, url
import lems_ui.views
import cenet.views
import results_viewer.views
import rtw_ui.views
import worm_conf.views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djlems.views.home', name='home'),
# url(r'^djlems/', include('djlems.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^$', lems_ui.views.home),
url(r'^home/', lems_ui.views.home),
url(r'^load_file/', lems_ui.views.load_file),
url(r'^neuron_model/$', lems_ui.views.comp_type_view),
url(r'^neuron_model/(?P<model_name>[\w|\W]+)/$', lems_ui.views.comp_type_view, name='neuron_model_by_name'),
url(r'^neuron_model_delete/(?P<model_name>[\w|\W]+)/$', lems_ui.views.delete_neuron_model),
url(r'^syn_model/$', lems_ui.views.synapse_model),
url(r'^syn_model/(?P<model_name>[\w|\W]+)/$', lems_ui.views.synapse_model),
url(r'^hier_details/$', lems_ui.views.hier_details),
url(r'^type_hierarchy/', lems_ui.views.type_hierarchy),
url(r'^get_descendants/', lems_ui.views.get_descendants),
url(r'^get_descendantscomponents/', lems_ui.views.get_descendantscomponents),
url(r'^component_hierarchy/', lems_ui.views.component_hierarchy),
url(r'^element_list/', lems_ui.views.element_list),
url(r'^element_list_custom/', lems_ui.views.element_list_custom),
url(r'^comp_details/', lems_ui.views.comp_details),
url(r'^save_model/', lems_ui.views.save_model),
url(r'^get_model/', lems_ui.views.get_model),
url(r'^model_to_lems_xml/', lems_ui.views.model_to_lems_xml),
url(r'^save_component_to_lems_xml/', lems_ui.views.save_component_to_lems_xml),
url(r'^model_list/', lems_ui.views.model_list),
url(r'^sched_job/', lems_ui.views.sched_job),
url(r'^save_comp/$', lems_ui.views.save_comp),
url(r'^new_comp/$', lems_ui.views.new_comp),
url(r'^new_comp/(?P<parent_name>[\w|\W]+)/$', lems_ui.views.new_comp),
## cenet urls
## TODO: these should be moved to a separate urls file
url(r'^cenet/net_to_lems_xml/', cenet.views.net_to_lems_xml),
url(r'^network_model_delete/(?P<net_name>[\w|\W]+)/$', cenet.views.delete_network_model),
url(r'^cenet/$', cenet.views.config_net),
url(r'^cenet/(?P<net_name>[\w|\W]+)/$', cenet.views.config_net , name = 'worm_conf_details'),
url(r'^save_net/', cenet.views.save_net),
url(r'^syn_conns/$', cenet.views.syn_conns),
url(r'^syn_conns_all/$', cenet.views.syn_conns_all),
url(r'^syn_conns/(?P<neuron>\w+)/', cenet.views.syn_conns),
url(r'^getConnectionMap/(?P<neuron>\w+)/', cenet.views.getConnectionMap),
url(r'^dashboard/', cenet.views.dashboard),
#results viewer urls
## TODO: these should be moved to a separate urls file
url(r'^results_viewer/', results_viewer.views.results_viewer, name='result_viewer'),
url(r'^results_viewer_ind/', results_viewer.views.results_viewer_ind),
#rtw viewer
url(r'^rtw_ui/dashboard/', rtw_ui.views.view_RTW_dashboard),
url(r'^rtw_ui/(?P<rtw_id>[\w|\W ]+)/', rtw_ui.views.rtw_ui, name='rtw_by_name'),
url(r'^rtw_ui/', rtw_ui.views.rtw_ui),
url(r'^rtw_save_conf/', rtw_ui.views.save_conf),
url(r'^rtw_get_net/(?P<net_id>\w+)/', rtw_ui.views.get_net),
url(r'^rtw_get_net/', rtw_ui.views.get_net),
url(r'^delete_rtw_model/(?P<model_name>[\w|\W]+)/$', rtw_ui.views.delete_rtw_model),
url(r'^worm_conf_per_user/', worm_conf.views.worm_conf_per_user, name='worm_conf_per_user'),
url(r'^worm_conf_get_details/(?P<worm_conf>\w+)/', worm_conf.views.worm_conf_get_details),
url(r'^worm_conf_get_intialisation/(?P<worm_conf>\w+)/', worm_conf.views.worm_conf_get_intialisation),
url(r'^worm_conf_get_bitfile/(?P<worm_conf>\w+)/(?P<neuron_id>\w+)/', worm_conf.views.worm_conf_get_bitfile),
url(r'^worm_conf_get_muscle_bitfile/(?P<worm_conf>\w+)/(?P<neuron_id>\w+)/', worm_conf.views.worm_conf_get_muscle_bitfile),
url(r'^worm_conf_get_metadatafile/(?P<worm_conf>\w+)/(?P<neuron_id>\w+)/', worm_conf.views.worm_conf_get_metadatafile),
url(r'^worm_conf_get_dmdfile/(?P<worm_conf>\w+)/(?P<dmd_id>\w+)/', worm_conf.views.worm_conf_get_dmdfile),
# Used to initialise neuron list and connectome in db
#url(r'^djlems/load_neurons/', cenet.views.load_neurons),
#url(r'^djlems/load_syns/', cenet.views.load_syns),
)
|
Si-elegans/Web-based_GUI_Tools
|
lems_ui/urls.py
|
Python
|
apache-2.0
| 4,731
|
[
"NEURON"
] |
0c577d7fce145fe76018bdc82965f3301ec0275cac70346c9fc634d3b05a5289
|
'''
Brian-specific extension to the Sphinx documentation generation system.
'''
|
brian-team/brian2genn
|
brian2genn/sphinxext/__init__.py
|
Python
|
gpl-2.0
| 80
|
[
"Brian"
] |
821822cc97555e34c39e32fd42e38a1715182e28c1b09f1fc71d5f46969adef8
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
from __future__ import absolute_import
import collections
import logging
from apache_beam import typehints
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.transforms.core import _GroupAlsoByWindow
from apache_beam.options.pipeline_options import DirectOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.value_provider import RuntimeValueProvider
__all__ = ['DirectRunner']
# Type variables.
K = typehints.TypeVariable('K')
V = typehints.TypeVariable('V')
@typehints.with_input_types(typehints.KV[K, typehints.Iterable[V]])
@typehints.with_output_types(typehints.KV[K, typehints.Iterable[V]])
class _StreamingGroupAlsoByWindow(_GroupAlsoByWindow):
"""Streaming GroupAlsoByWindow placeholder for overriding in DirectRunner."""
pass
class DirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
# A list of PTransformOverride objects to be applied before running a pipeline
# using DirectRunner.
# Currently this only works for overrides where the input and output types do
# not change.
# For internal SDK use only. This should not be updated by Beam pipeline
# authors.
_PTRANSFORM_OVERRIDES = []
def __init__(self):
self._cache = None
def apply_CombinePerKey(self, transform, pcoll):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
try:
return pcoll | LiftedCombinePerKey(
transform.fn, transform.args, transform.kwargs)
except NotImplementedError:
return transform.expand(pcoll)
def apply__GroupAlsoByWindow(self, transform, pcoll):
if (transform.__class__ == _GroupAlsoByWindow and
pcoll.pipeline._options.view_as(StandardOptions).streaming):
# Use specialized streaming implementation, if requested.
raise NotImplementedError(
'Streaming support is not yet available on the DirectRunner.')
# TODO(ccy): enable when streaming implementation is plumbed through.
# type_hints = transform.get_type_hints()
# return pcoll | (_StreamingGroupAlsoByWindow(transform.windowing)
# .with_input_types(*type_hints.input_types[0])
# .with_output_types(*type_hints.output_types[0]))
return transform.expand(pcoll)
def run(self, pipeline):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# Performing configured PTransform overrides.
pipeline.replace_all(DirectRunner._PTRANSFORM_OVERRIDES)
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
MetricsEnvironment.set_metrics_supported(True)
logging.info('Running pipeline with DirectRunner.')
self.consumer_tracking_visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.consumer_tracking_visitor)
evaluation_context = EvaluationContext(
pipeline._options,
BundleFactory(stacked=pipeline._options.view_as(DirectOptions)
.direct_runner_use_stacked_bundle),
self.consumer_tracking_visitor.root_transforms,
self.consumer_tracking_visitor.value_to_consumers,
self.consumer_tracking_visitor.step_names,
self.consumer_tracking_visitor.views)
evaluation_context.use_pvalue_cache(self._cache)
executor = Executor(self.consumer_tracking_visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# DirectRunner does not support injecting
# PipelineOptions values at runtime
RuntimeValueProvider.set_runtime_options({})
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
executor.start(self.consumer_tracking_visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
if self._cache:
# We are running in eager mode, block until the pipeline execution
# completes in order to have full results in the cache.
result.wait_until_finish()
self._cache.finalize()
return result
@property
def cache(self):
if not self._cache:
self._cache = BufferingInMemoryCache()
return self._cache.pvalue_cache
class BufferingInMemoryCache(object):
"""PValueCache wrapper for buffering bundles until a PValue is fully computed.
BufferingInMemoryCache keeps an in memory cache of
(applied_ptransform, tag) tuples. It accepts appending to existing cache
entries until it is finalized. finalize() will make all the existing cached
entries visible to the underyling PValueCache in their entirety, clean the in
memory cache and stop accepting new cache entries.
"""
def __init__(self):
self._cache = collections.defaultdict(list)
self._pvalue_cache = PValueCache()
self._finalized = False
@property
def pvalue_cache(self):
return self._pvalue_cache
def append(self, applied_ptransform, tag, elements):
assert not self._finalized
assert elements is not None
self._cache[(applied_ptransform, tag)].extend(elements)
def finalize(self):
"""Make buffered cache elements visible to the underlying PValueCache."""
assert not self._finalized
for key, value in self._cache.iteritems():
applied_ptransform, tag = key
self._pvalue_cache.cache_output(applied_ptransform, tag, value)
self._cache = None
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def _is_in_terminal_state(self):
return self._state is not PipelineState.RUNNING
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
class EagerRunner(DirectRunner):
is_eager = True
|
wtanaka/beam
|
sdks/python/apache_beam/runners/direct/direct_runner.py
|
Python
|
apache-2.0
| 8,397
|
[
"VisIt"
] |
2ef580275b46892efa2d2cb53abc15786de976374d90bba06e6e8623c7d20746
|
import json
import logging
from difflib import ndiff
import waffle
from django import forms
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.forms.widgets import CheckboxSelectMultiple
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.six import string_types
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from taggit.utils import parse_tags
import kuma.wiki.content
from kuma.core.form_fields import StrippedCharField
from kuma.core.urlresolvers import reverse
from kuma.spam.akismet import AkismetError
from kuma.spam.forms import AkismetCheckFormMixin, AkismetSubmissionFormMixin
from .constants import (DOCUMENT_PATH_RE, INVALID_DOC_SLUG_CHARS_RE,
INVALID_REV_SLUG_CHARS_RE, LOCALIZATION_FLAG_TAGS,
RESERVED_SLUGS_RES, REVIEW_FLAG_TAGS,
SLUG_CLEANSING_RE, SPAM_EXEMPTED_FLAG,
SPAM_OTHER_HEADERS, SPAM_SUBMISSION_REVISION_FIELDS,
SPAM_TRAINING_FLAG, TEMPLATE_TITLE_PREFIX)
from .events import EditDocumentEvent
from .models import (Document, DocumentSpamAttempt, DocumentTag, Revision,
RevisionIP, RevisionAkismetSubmission, valid_slug_parent)
from .tasks import send_first_edit_email
TITLE_REQUIRED = _(u'Please provide a title.')
TITLE_SHORT = _(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
TITLE_PLACEHOLDER = _(u'Name Your Article')
SLUG_REQUIRED = _(u'Please provide a slug.')
SLUG_INVALID = _(u'The slug provided is not valid.')
SLUG_SHORT = _(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _(u'Please provide a summary.')
SUMMARY_SHORT = _(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _(u'Please provide content.')
CONTENT_SHORT = _(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
SLUG_COLLIDES = _(u'Another document with this slug already exists.')
OTHER_COLLIDES = _(u'Another document with this metadata already exists.')
MIDAIR_COLLISION = _(u'Publishing failed. Conflicting edit attempts detected. '
u'Please copy and paste your edits to a safe place and '
u'visit the <a href="%(url)s">revision history</a> page '
u'to see what was changed before making further edits.')
MOVE_REQUIRED = _(u"Changing this document's slug requires "
u"moving it and its children.")
log = logging.getLogger('kuma.wiki.forms')
class AkismetRevisionData(object):
"""
Collect Akismet data at creation time or later.
This can be used in three different scenarios:
- A user is creating a Document
- A user is editing a Document
- A user created or edited a document in the past
Derived classes customize __init__ to gather data from the relevant
instances, and then .parameters will have the Akismet submission.
"""
def __init__(self):
"""Initialize the parameters."""
self.default_language = settings.WIKI_DEFAULT_LANGUAGE
self.parameters = {
'blog_charset': 'UTF-8',
'comment_type': 'wiki-revision'
}
def akismet_lang(self, language):
"""
Convert a Django language name to an Akismet blog_lang identifier.
E.g.: "en-US" to "en_us"
"""
return translation.to_locale(language).lower()
def content_from_form(self, cleaned_data):
"""Create a combined content string from form data."""
parts = []
for field in SPAM_SUBMISSION_REVISION_FIELDS:
value = cleaned_data.get(field, u'')
if field == 'tags':
value = self.split_tags(value)
parts.append(value)
return u'\n'.join(parts)
def content_from_document(self, document):
"""Create a combined content string from a document."""
parts = []
current_revision = document.current_revision
assert current_revision, "document must have a current revision."
for field in SPAM_SUBMISSION_REVISION_FIELDS:
if field == 'comment':
value = u''
elif field == 'content':
value = current_revision.content
elif field == 'tags':
value = self.split_tags(current_revision.tags)
else:
value = getattr(document, field, '')
parts.append(value)
return u'\n'.join(parts)
def content_from_revision(self, revision):
"""Create a combined content string from a Revision."""
parts = []
for field in SPAM_SUBMISSION_REVISION_FIELDS:
value = getattr(revision, field) or u''
if field == 'tags':
value = self.split_tags(value)
parts.append(value)
return u'\n'.join(parts)
def set_blog(self, request):
"""Set the blog parameter from the request object."""
self.parameters['blog'] = request.build_absolute_uri('/')
def set_blog_lang(self, language=None):
"""
Set the blog_lang from a Django language name.
If the language is not English, then report that the content may be a
combination of the target language and untranslated English.
"""
language = language or self.default_language
if language == self.default_language:
blog_lang = self.akismet_lang(language)
else:
blog_lang = '%s, %s' % (
self.akismet_lang(language),
self.akismet_lang(self.default_language))
self.parameters['blog_lang'] = blog_lang
def set_by_edit_request(self, request):
"""
Add data from the content creator's request object.
Includes:
- The base blog address
- The author information
- Named HTTP headers
- Other HTTP headers, as modeled by Akismet's Wordpress plugin:
https://plugins.trac.wordpress.org/browser/akismet/trunk/class.akismet.php
"""
self.set_blog(request)
self.set_comment_author(request.user)
meta = request.META
self.parameters.update({
'referrer': meta.get('HTTP_REFERER', ''),
'user_agent': meta.get('HTTP_USER_AGENT', ''),
'user_ip': meta.get('REMOTE_ADDR', ''),
})
for key, value in meta.items():
if not isinstance(value, string_types):
continue
if key.startswith('HTTP_COOKIE'):
continue
if key.startswith('HTTP_') or key in SPAM_OTHER_HEADERS:
self.parameters[key] = value
def set_comment_author(self, user):
"""Set the comment author from a User object."""
self.parameters.update({
'comment_author': (user.fullname or user.get_full_name() or
user.username),
'comment_author_email': user.email,
})
def set_content(self, new_content, existing_content=None):
"""Set comment_content to the new and changed non-empty lines."""
existing_content = existing_content or u''
diff = ndiff(existing_content.splitlines(1), new_content.splitlines(1))
lines = []
for line in diff:
if line.startswith('+ '):
diff_content = line[2:].strip()
if diff_content:
lines.append(diff_content)
self.parameters['comment_content'] = u'\n'.join(lines)
def set_permalink(self, document, request):
"""Set the permalink for the Document."""
doc_url = document.get_absolute_url()
self.parameters['permalink'] = request.build_absolute_uri(doc_url)
def split_tags(self, tag_string):
"""Turn '"Tag 2" "Tag 1"' into 'Tag 1\nTag 2'."""
return u'\n'.join(parse_tags(tag_string))
class AkismetNewDocumentData(AkismetRevisionData):
"""Collect Akismet data for a user creating a new document."""
def __init__(self, request, cleaned_data, language=None):
"""
Initialize from a new document form submission by the author.
Keyword Parameters:
request - the Request for the author
cleaned_data - the validated form data
language - the language of the revision being created
"""
super(AkismetNewDocumentData, self).__init__()
self.set_by_edit_request(request)
self.set_blog_lang(language)
new_content = self.content_from_form(cleaned_data)
self.set_content(new_content)
class AkismetNewTranslationData(AkismetRevisionData):
"""Collect Akismet data for a user creating a new translation."""
def __init__(self, request, cleaned_data, english_document, language):
"""
Initialize from a new translation form submission by the author.
Keyword Parameters:
request - the Request for the author
cleaned_data - the validated form data
english_document - the original English document
language - the language of the revision being created
"""
super(AkismetNewTranslationData, self).__init__()
self.set_by_edit_request(request)
self.set_blog_lang(language)
new_content = self.content_from_form(cleaned_data)
existing_content = self.content_from_document(english_document)
self.set_content(new_content, existing_content)
class AkismetEditDocumentData(AkismetRevisionData):
"""Collect Akismet data for a user editing an existing document."""
def __init__(self, request, cleaned_data, document):
"""
Initialize from an edit page form submission by the author.
Keyword Parameters:
request - the Request for the author
cleaned_data - the validated form data
document - the document the user is editing
"""
super(AkismetEditDocumentData, self).__init__()
self.set_by_edit_request(request)
self.set_blog_lang(document.locale)
self.set_permalink(document, request)
new_content = self.content_from_form(cleaned_data)
existing_content = self.content_from_document(document)
self.set_content(new_content, existing_content)
class AkismetHistoricalData(AkismetRevisionData):
"""Collect Akismet data from a historical revision."""
def __init__(self, revision, request=None):
"""Initialize from a historical revision.
Keyword Parameters:
revision - the historical Revision
request - an optional request object
"""
assert revision.id, "Must be a saved Revision."
assert revision.document_id, "Must be a Revision with a Document."
super(AkismetHistoricalData, self).__init__()
revision_ip = revision.revisionip_set.first()
if revision_ip:
if revision_ip.data:
# Use captured Akismet submission
self.parameters = json.loads(revision_ip.data)
return
else:
self.parameters.update({
'user_ip': revision_ip.ip,
'user_agent': revision_ip.user_agent,
'referrer': revision_ip.referrer,
})
else:
self.parameters.update({
'user_ip': '0.0.0.0',
'user_agent': '',
'referrer': '',
})
document = revision.document
self.set_blog_lang(document.locale)
if request:
self.set_blog(request)
self.set_permalink(document, request)
self.set_comment_author(revision.creator)
new_content = self.content_from_revision(revision)
old_revision = revision.get_previous()
if old_revision:
old_content = self.content_from_revision(old_revision)
else:
old_content = None
self.set_content(new_content, old_content)
class DocumentForm(forms.ModelForm):
"""
Used for managing the wiki document data model that houses general
data of a wiki page.
"""
title = StrippedCharField(min_length=1,
max_length=255,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_(u'Title:'),
help_text=_(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1,
max_length=255,
widget=forms.TextInput(),
label=_(u'Slug:'),
help_text=_(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
parent_topic = forms.ModelChoiceField(queryset=Document.objects.all(),
required=False,
label=_(u'Parent:'))
locale = forms.CharField(widget=forms.HiddenInput())
class Meta:
model = Document
fields = ('title', 'slug', 'locale')
def __init__(self, *args, **kwargs):
# when creating a new document with a parent, this will be set
self.parent_slug = kwargs.pop('parent_slug', None)
super(DocumentForm, self).__init__(*args, **kwargs)
def clean_slug(self):
slug = self.cleaned_data['slug']
if slug == '':
# Default to the title, if missing.
slug = self.cleaned_data['title']
elif self.parent_slug:
# Prepend parent slug if given from view
slug = self.parent_slug + '/' + slug
# check both for disallowed characters and match for the allowed
if (INVALID_DOC_SLUG_CHARS_RE.search(slug) or
not DOCUMENT_PATH_RE.search(slug)):
raise forms.ValidationError(SLUG_INVALID)
# Guard against slugs that match urlpatterns
for pattern in RESERVED_SLUGS_RES:
if pattern.match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
def save(self, parent=None, *args, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, *args, **kwargs)
doc.parent = parent
if 'parent_topic' in self.cleaned_data:
doc.parent_topic = self.cleaned_data['parent_topic']
doc.save()
# not strictly necessary since we didn't change
# any m2m data since we instantiated the doc
self.save_m2m()
return doc
class RevisionForm(AkismetCheckFormMixin, forms.ModelForm):
"""
Form to create new revisions.
"""
title = StrippedCharField(
min_length=1,
max_length=255,
required=False,
widget=forms.TextInput(attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_(u'Title:'),
help_text=_(u'Title of article'),
error_messages={
'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG,
}
)
slug = StrippedCharField(
min_length=1,
max_length=255,
required=False,
widget=forms.TextInput(),
label=_(u'Slug:'),
help_text=_(u'Article URL'),
error_messages={
'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG,
}
)
tags = StrippedCharField(
required=False,
label=_(u'Tags:'),
)
keywords = StrippedCharField(
required=False,
label=_(u'Keywords:'),
help_text=_(u'Affects search results'),
)
summary = StrippedCharField(
required=False,
min_length=5,
max_length=1000,
widget=forms.Textarea(),
label=_(u'Search result summary:'),
help_text=_(u'Only displayed on search results page'),
error_messages={
'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG
},
)
content = StrippedCharField(
min_length=5,
max_length=300000,
label=_(u'Content:'),
widget=forms.Textarea(),
error_messages={
'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG,
}
)
comment = StrippedCharField(
max_length=255,
required=False,
label=_(u'Comment:')
)
review_tags = forms.MultipleChoiceField(
label=ugettext("Tag this revision for review?"),
widget=CheckboxSelectMultiple,
required=False,
choices=REVIEW_FLAG_TAGS,
)
localization_tags = forms.MultipleChoiceField(
label=ugettext("Tag this revision for localization?"),
widget=CheckboxSelectMultiple,
required=False,
choices=LOCALIZATION_FLAG_TAGS,
)
current_rev = forms.CharField(
required=False,
widget=forms.HiddenInput(),
)
class Meta(object):
model = Revision
fields = ('title', 'slug', 'tags', 'keywords', 'summary', 'content',
'comment', 'based_on', 'toc_depth',
'render_max_age')
def __init__(self, *args, **kwargs):
self.section_id = kwargs.pop('section_id', None)
self.is_async_submit = kwargs.pop('is_async_submit', None)
# when creating a new document with a parent, this will be set
self.parent_slug = kwargs.pop('parent_slug', None)
super(RevisionForm, self).__init__(*args, **kwargs)
self.fields['based_on'].widget = forms.HiddenInput()
if self.instance and self.instance.pk:
# Ensure both title and slug are populated from parent document,
# if last revision didn't have them
if not self.instance.title:
self.initial['title'] = self.instance.document.title
if not self.instance.slug:
self.initial['slug'] = self.instance.document.slug
content = self.instance.content
if not self.instance.document.is_template:
parsed_content = kuma.wiki.content.parse(content)
parsed_content.injectSectionIDs()
if self.section_id:
parsed_content.extractSection(self.section_id)
parsed_content.filterEditorSafety()
content = parsed_content.serialize()
self.initial['content'] = content
self.initial['review_tags'] = list(self.instance
.review_tags
.names())
self.initial['localization_tags'] = list(self.instance
.localization_tags
.names())
if self.section_id:
self.fields['toc_depth'].required = False
self.is_template = None
def clean_slug(self):
# Since this form can change the URL of the page on which the editing
# happens, changes to the slug are ignored for an iframe submissions
if self.is_async_submit:
return self.instance.document.slug
# Get the cleaned slug
slug = self.cleaned_data['slug']
# first check if the given slug doesn't contain slashes and other
# characters not allowed in a revision slug component (without parent)
if slug and INVALID_REV_SLUG_CHARS_RE.search(slug):
raise forms.ValidationError(SLUG_INVALID)
# edits can come in without a slug, so default to the current doc slug
if not slug:
try:
slug = self.instance.slug = self.instance.document.slug
except ObjectDoesNotExist:
pass
# then if there is a parent document we prefix the slug with its slug
if self.parent_slug:
slug = u'/'.join([self.parent_slug, slug])
try:
doc = Document.objects.get(locale=self.instance.document.locale,
slug=slug)
if self.instance and self.instance.document:
if (not doc.get_redirect_url() and
doc.pk != self.instance.document.pk):
# There's another document with this value,
# and we're not a revision of it.
raise forms.ValidationError(SLUG_COLLIDES)
else:
# This document-and-revision doesn't exist yet, so there
# shouldn't be any collisions at all.
raise forms.ValidationError(SLUG_COLLIDES)
except Document.DoesNotExist:
# No existing document for this value, so we're good here.
pass
self.is_template = slug.startswith(TEMPLATE_TITLE_PREFIX)
return slug
def clean_tags(self):
"""
Validate the tags ensuring we have no case-sensitive duplicates.
"""
tags = self.cleaned_data['tags']
cleaned_tags = []
if tags:
for tag in parse_tags(tags):
# Note: The exact match query doesn't work correctly with
# MySQL with regards to case-sensitivity. If we move to
# Postgresql in the future this code may need to change.
doc_tag = (DocumentTag.objects.filter(name__exact=tag)
.values_list('name', flat=True))
# Write a log we can grep to help find pre-existing duplicate
# document tags for cleanup.
if len(doc_tag) > 1:
log.warn('Found duplicate document tags: %s' % doc_tag)
if doc_tag:
if doc_tag[0] != tag and doc_tag[0].lower() == tag.lower():
# The tag differs only by case. Do not add a new one,
# add the existing one.
cleaned_tags.append(doc_tag[0])
continue
cleaned_tags.append(tag)
return ' '.join([u'"%s"' % t for t in cleaned_tags])
def clean_content(self):
"""
Validate the content, performing any section editing if necessary
"""
content = self.cleaned_data['content']
# If we're editing a section, we need to replace the section content
# from the current revision.
if self.section_id and self.instance and self.instance.document:
# Make sure we start with content form the latest revision.
full_content = self.instance.document.current_revision.content
# Replace the section content with the form content.
parsed_content = kuma.wiki.content.parse(full_content)
parsed_content.replaceSection(self.section_id, content)
content = parsed_content.serialize()
return content
def clean_current_rev(self):
"""
If a current revision is supplied in the form, compare it against
what the document claims is the current revision. If there's a
difference, then an edit has occurred since the form was constructed
and we treat it as a mid-air collision.
"""
current_rev = self.cleaned_data.get('current_rev', None)
if not current_rev:
# If there's no current_rev, just bail.
return current_rev
try:
doc_current_rev = self.instance.document.current_revision.id
if unicode(current_rev) != unicode(doc_current_rev):
if (self.section_id and self.instance and
self.instance.document):
# This is a section edit. So, even though the revision has
# changed, it still might not be a collision if the section
# in particular hasn't changed.
orig_ct = (Revision.objects
.get(pk=current_rev)
.get_section_content(self.section_id))
curr_ct = (self.instance
.document.current_revision
.get_section_content(self.section_id))
if orig_ct != curr_ct:
# Oops. Looks like the section did actually get
# changed, so yeah this is a collision.
url = reverse(
'wiki.document_revisions',
kwargs={'document_path': self.instance.document.slug}
)
raise forms.ValidationError(MIDAIR_COLLISION % {'url': url})
return current_rev
else:
# No section edit, so this is a flat-out collision.
url = reverse(
'wiki.document_revisions',
kwargs={'document_path': self.instance.document.slug}
)
raise forms.ValidationError(MIDAIR_COLLISION % {'url': url})
except Document.DoesNotExist:
# If there's no document yet, just bail.
return current_rev
def akismet_enabled(self):
"""
Adds two ways that Akismet checks could be disabled:
* Edit is to a KumaScript template
* User has the SPAM_EXEMPTED_FLAG
"""
client_ready = super(RevisionForm, self).akismet_enabled()
user_exempted = waffle.flag_is_active(self.request, SPAM_EXEMPTED_FLAG)
return client_ready and not user_exempted and not self.is_template
@property
def akismet_error_message(self):
request = getattr(self, 'request', None)
user = request and request.user
return mark_safe(render_to_string('wiki/includes/spam_error.html',
{'user': user}))
def akismet_error(self, parameters, exception=None):
"""
Upon errors from the Akismet API records the user, document
and date of the attempt for further analysis. Then call the
parent class' error handler.
"""
try:
document = self.instance.document
except ObjectDoesNotExist:
document = None
if exception and isinstance(exception, AkismetError):
# For Akismet errors, save the submission and exception details
dsa_params = parameters.copy()
dsa_params['akismet_status_code'] = exception.status_code
dsa_params['akismet_debug_help'] = exception.debug_help
dsa_params['akismet_response'] = exception.response.content
review = DocumentSpamAttempt.AKISMET_ERROR
else:
# For detected spam, save the details for review
dsa_params = parameters
review = DocumentSpamAttempt.NEEDS_REVIEW
# Wrapping this in a try/finally to make sure that even if
# creating a spam attempt object fails we call the parent
# method that raises a ValidationError
try:
DocumentSpamAttempt.objects.create(
title=self.cleaned_data['title'],
slug=self.cleaned_data['slug'],
user=self.request.user,
document=document,
data=json.dumps(dsa_params, indent=2, sort_keys=True),
review=review
)
finally:
if not waffle.flag_is_active(self.request, SPAM_TRAINING_FLAG):
super(RevisionForm, self).akismet_error(parameters, exception)
def akismet_parameters(self):
"""
Returns the parameters for Akismet's check-comment API endpoint.
The form cleaning also saves the data into the instance, which will
cause future calls to return different data. The results during the
initial form cleaning are cached in ._akismet_data, and returned for
future calls, such as the unit tests.
"""
if not getattr(self, '_akismet_data', None):
try:
document = self.instance.document
except ObjectDoesNotExist:
self._akismet_data = AkismetNewDocumentData(
self.request, self.cleaned_data, self.data.get('locale'))
else:
if document.current_revision:
self._akismet_data = AkismetEditDocumentData(
self.request, self.cleaned_data, document)
else:
# New translation, compare to English document
based_on = self.cleaned_data.get('based_on')
assert based_on, 'Expected a new translation.'
document = based_on.document
self._akismet_data = AkismetNewTranslationData(
self.request, self.cleaned_data, document,
self.data.get('locale'))
parameters = self._akismet_data.parameters.copy()
parameters.update(self.akismet_parameter_overrides())
return parameters
def save(self, document, **kwargs):
"""
Persists the revision and returns it.
Takes the view request and document of the revision.
Does some specific things when the revision is fully saved.
"""
# have to check for first edit before we save
is_first_edit = not self.request.user.wiki_revisions().exists()
# Making sure we don't commit the saving right away since we
# want to do other things here.
kwargs['commit'] = False
if self.section_id and self.instance and self.instance.document:
# The logic to save a section is slightly different and may
# need to evolve over time; a section edit doesn't submit
# all the fields, and we need to account for that when we
# construct the new Revision.
doc = Document.objects.get(pk=self.instance.document.id)
old_rev = doc.current_revision
new_rev = super(RevisionForm, self).save(**kwargs)
new_rev.document = document
new_rev.creator = self.request.user
new_rev.toc_depth = old_rev.toc_depth
new_rev.save()
new_rev.review_tags.set(*list(old_rev.review_tags.names()))
else:
new_rev = super(RevisionForm, self).save(**kwargs)
new_rev.document = document
new_rev.creator = self.request.user
new_rev.toc_depth = self.cleaned_data['toc_depth']
new_rev.save()
new_rev.review_tags.set(*self.cleaned_data['review_tags'])
new_rev.localization_tags.set(*self.cleaned_data['localization_tags'])
# when enabled store the user's IP address
if waffle.switch_is_active('store_revision_ips'):
RevisionIP.objects.log(
revision=new_rev,
headers=self.request.META,
data=json.dumps(self.akismet_parameters(),
indent=2, sort_keys=True)
)
# send first edit emails
if is_first_edit:
send_first_edit_email.delay(new_rev.pk)
# schedule a document rendering
document.schedule_rendering('max-age=0')
# schedule event notifications
EditDocumentEvent(new_rev).fire(exclude=new_rev.creator)
return new_rev
class RevisionAkismetSubmissionAdminForm(AkismetSubmissionFormMixin,
forms.ModelForm):
"""
A model form used in the admin UI to submit missed spam or ham.
In the Django admin, an admin can both mark a revision as missed spam,
and correct an incorrectly marked spam.
The ``AkismetSubmissionFormMixin`` class submits the data to Akismet in
the ``clean`` method, using the override methods in this and derived
classes. Users of the form must set the ``sender`` to the request user
before calling ``is_valid()``.
"""
class Meta(object):
model = RevisionAkismetSubmission
exclude = ['sender', 'sent']
def akismet_submission_type(self):
"""The submission type is determined from the submitted form data."""
return self.cleaned_data['type']
def akismet_parameters(self):
"""
Returns parameter dict to pass to Akismet's submission API endpoints.
"""
revision = self.cleaned_data['revision']
akismet_data = AkismetHistoricalData(revision, self.request)
return akismet_data.parameters
class RevisionAkismetSubmissionSpamForm(RevisionAkismetSubmissionAdminForm):
"""
A model form for submitting missed spam.
For public dashboards, the only valid submission type is spam, so the
type is omitted from the form and hard-coded as "spam".
"""
class Meta(RevisionAkismetSubmissionAdminForm.Meta):
exclude = ['sender', 'sent', 'type']
def akismet_submission_type(self):
"""Force the submission type to spam."""
return "spam"
class TreeMoveForm(forms.Form):
title = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_(u'Title:'),
help_text=_(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
widget=forms.TextInput(),
label=_(u'New slug:'),
help_text=_(u'New article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
locale = StrippedCharField(min_length=2, max_length=5,
widget=forms.HiddenInput())
def clean_slug(self):
slug = self.cleaned_data['slug']
# We only want the slug here; inputting a full URL would lead
# to disaster.
if '://' in slug:
raise forms.ValidationError('Please enter only the slug to move '
'to, not the full URL.')
# Removes leading slash and {locale/docs/} if necessary
# IMPORTANT: This exact same regex is used on the client side, so
# update both if doing so
slug = SLUG_CLEANSING_RE.sub('', slug)
# Remove the trailing slash if one is present, because it
# will screw up the page move, which doesn't expect one.
return slug.rstrip('/')
def clean(self):
cleaned_data = super(TreeMoveForm, self).clean()
if set(['slug', 'locale']).issubset(cleaned_data):
slug, locale = cleaned_data['slug'], cleaned_data['locale']
try:
valid_slug_parent(slug, locale)
except Exception as e:
raise forms.ValidationError(e.args[0])
return cleaned_data
class DocumentDeletionForm(forms.Form):
reason = forms.CharField(widget=forms.Textarea(attrs={'autofocus': 'true'}))
|
ollie314/kuma
|
kuma/wiki/forms.py
|
Python
|
mpl-2.0
| 37,414
|
[
"VisIt"
] |
b585ff14857a6525488aee4542a4fc1fa57520414af3ba652ad9823bf100b6fc
|
import numpy as np
import tensorflow as tf
from yadlt.models.boltzmann import dbn
from yadlt.utils import datasets, utilities
# #################### #
# Flags definition #
# #################### #
flags = tf.app.flags
FLAGS = flags.FLAGS
# Global configuration
flags.DEFINE_string('dataset', 'mnist', 'Which dataset to use. ["mnist", "cifar10", "custom"]')
flags.DEFINE_string('train_dataset', '', 'Path to train set .npy file.')
flags.DEFINE_string('train_labels', '', 'Path to train labels .npy file.')
flags.DEFINE_string('valid_dataset', '', 'Path to valid set .npy file.')
flags.DEFINE_string('valid_labels', '', 'Path to valid labels .npy file.')
flags.DEFINE_string('test_dataset', '', 'Path to test set .npy file.')
flags.DEFINE_string('test_labels', '', 'Path to test labels .npy file.')
flags.DEFINE_string('cifar_dir', '', 'Path to the cifar 10 dataset directory.')
flags.DEFINE_string('name', 'dbn', 'Name of the model.')
flags.DEFINE_string('save_predictions', '', 'Path to a .npy file to save predictions of the model.')
flags.DEFINE_string('save_layers_output_test', '', 'Path to a .npy file to save test set output from all the layers of the model.')
flags.DEFINE_string('save_layers_output_train', '', 'Path to a .npy file to save train set output from all the layers of the model.')
flags.DEFINE_boolean('do_pretrain', True, 'Whether or not pretrain the network.')
flags.DEFINE_integer('seed', -1, 'Seed for the random generators (>= 0). Useful for testing hyperparameters.')
flags.DEFINE_float('momentum', 0.5, 'Momentum parameter.')
# RBMs layers specific parameters
flags.DEFINE_string('rbm_layers', '256,', 'Comma-separated values for the layers in the sdae.')
flags.DEFINE_boolean('rbm_gauss_visible', False, 'Whether to use Gaussian units for the visible layer.')
flags.DEFINE_float('rbm_stddev', 0.1, 'Standard deviation for Gaussian visible units.')
flags.DEFINE_string('rbm_learning_rate', '0.001,', 'Initial learning rate.')
flags.DEFINE_string('rbm_num_epochs', '10,', 'Number of epochs.')
flags.DEFINE_string('rbm_batch_size', '32,', 'Size of each mini-batch.')
flags.DEFINE_string('rbm_gibbs_k', '1,', 'Gibbs sampling steps.')
# Supervised fine tuning parameters
flags.DEFINE_string('finetune_act_func', 'relu', 'Activation function.')
flags.DEFINE_float('finetune_learning_rate', 0.01, 'Learning rate.')
flags.DEFINE_float('finetune_momentum', 0.9, 'Momentum parameter.')
flags.DEFINE_integer('finetune_num_epochs', 10, 'Number of epochs.')
flags.DEFINE_integer('finetune_batch_size', 32, 'Size of each mini-batch.')
flags.DEFINE_string('finetune_opt', 'momentum', '["sgd", "ada_grad", "momentum", "adam"]')
flags.DEFINE_string('finetune_loss_func', 'softmax_cross_entropy', 'Loss function. ["mse", "softmax_cross_entropy"]')
flags.DEFINE_float('finetune_dropout', 1, 'Dropout parameter.')
# Conversion of Autoencoder layers parameters from string to their specific type
rbm_layers = utilities.flag_to_list(FLAGS.rbm_layers, 'int')
rbm_learning_rate = utilities.flag_to_list(FLAGS.rbm_learning_rate, 'float')
rbm_num_epochs = utilities.flag_to_list(FLAGS.rbm_num_epochs, 'int')
rbm_batch_size = utilities.flag_to_list(FLAGS.rbm_batch_size, 'int')
rbm_gibbs_k = utilities.flag_to_list(FLAGS.rbm_gibbs_k, 'int')
# Parameters validation
assert FLAGS.dataset in ['mnist', 'cifar10', 'custom']
assert FLAGS.finetune_act_func in ['sigmoid', 'tanh', 'relu']
assert len(rbm_layers) > 0
if __name__ == '__main__':
utilities.random_seed_np_tf(FLAGS.seed)
if FLAGS.dataset == 'mnist':
# ################# #
# MNIST Dataset #
# ################# #
trX, trY, vlX, vlY, teX, teY = datasets.load_mnist_dataset(mode='supervised')
elif FLAGS.dataset == 'cifar10':
# ################### #
# Cifar10 Dataset #
# ################### #
trX, trY, teX, teY = datasets.load_cifar10_dataset(FLAGS.cifar_dir, mode='supervised')
vlX = teX[:5000] # Validation set is the first half of the test set
vlY = teY[:5000]
elif FLAGS.dataset == 'custom':
# ################## #
# Custom Dataset #
# ################## #
def load_from_np(dataset_path):
if dataset_path != '':
return np.load(dataset_path)
else:
return None
trX, trY = load_from_np(FLAGS.train_dataset), load_from_np(FLAGS.train_labels)
vlX, vlY = load_from_np(FLAGS.valid_dataset), load_from_np(FLAGS.valid_labels)
teX, teY = load_from_np(FLAGS.test_dataset), load_from_np(FLAGS.test_labels)
else:
trX, trY, vlX, vlY, teX, teY = None, None, None, None, None, None
# Create the object
finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)
srbm = dbn.DeepBeliefNetwork(
name=FLAGS.name, do_pretrain=FLAGS.do_pretrain,
rbm_layers=rbm_layers,
finetune_act_func=finetune_act_func, rbm_learning_rate=rbm_learning_rate,
rbm_num_epochs=rbm_num_epochs, rbm_gibbs_k = rbm_gibbs_k,
rbm_gauss_visible=FLAGS.rbm_gauss_visible, rbm_stddev=FLAGS.rbm_stddev,
momentum=FLAGS.momentum, rbm_batch_size=rbm_batch_size, finetune_learning_rate=FLAGS.finetune_learning_rate,
finetune_num_epochs=FLAGS.finetune_num_epochs, finetune_batch_size=FLAGS.finetune_batch_size,
finetune_opt=FLAGS.finetune_opt, finetune_loss_func=FLAGS.finetune_loss_func,
finetune_dropout=FLAGS.finetune_dropout)
# Fit the model (unsupervised pretraining)
if FLAGS.do_pretrain:
srbm.pretrain(trX, vlX)
# finetuning
print('Start deep belief net finetuning...')
srbm.fit(trX, trY, vlX, vlY)
# Test the model
print('Test set accuracy: {}'.format(srbm.score(teX, teY)))
# Save the predictions of the model
if FLAGS.save_predictions:
print('Saving the predictions for the test set...')
np.save(FLAGS.save_predictions, srbm.predict(teX))
def save_layers_output(which_set):
if which_set == 'train':
trout = srbm.get_layers_output(trX)
for i, o in enumerate(trout):
np.save(FLAGS.save_layers_output_train + '-layer-' + str(i + 1) + '-train', o)
elif which_set == 'test':
teout = srbm.get_layers_output(teX)
for i, o in enumerate(teout):
np.save(FLAGS.save_layers_output_test + '-layer-' + str(i + 1) + '-test', o)
# Save output from each layer of the model
if FLAGS.save_layers_output_test:
print('Saving the output of each layer for the test set')
save_layers_output('test')
# Save output from each layer of the model
if FLAGS.save_layers_output_train:
print('Saving the output of each layer for the train set')
save_layers_output('train')
|
blackecho/Deep-Learning-TensorFlow
|
cmd_line/boltzmann/run_dbn.py
|
Python
|
mit
| 6,820
|
[
"Gaussian"
] |
eb1fa072b0d00c8d62c153a2674b40ad1510d2fe2258bf4587e19c8ca2e229c0
|
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for Lipinski parameter calculation
This provides a workout for the SMARTS matcher
"""
from __future__ import print_function
from rdkit import RDConfig
import unittest,os
from rdkit.six.moves import cPickle
from rdkit import Chem
from rdkit.Chem import Lipinski
class TestCase(unittest.TestCase):
def setUp(self):
print('\n%s: '%self.shortDescription(),end='')
self.inFileName = '%s/NCI/first_200.props.sdf'%(RDConfig.RDDataDir)
def test1(self):
" testing first 200 mols from NCI "
suppl = Chem.SDMolSupplier(self.inFileName)
idx = 1
oldDonorSmarts = Chem.MolFromSmarts('[NH1,NH2,OH1]')
OldDonorCount = lambda x,y=oldDonorSmarts:Lipinski._NumMatches(x,y)
oldAcceptorSmarts = Chem.MolFromSmarts('[N,O]')
OldAcceptorCount = lambda x,y=oldAcceptorSmarts:Lipinski._NumMatches(x,y)
for m in suppl:
if m:
calc = Lipinski.NHOHCount(m)
orig = int(m.GetProp('NUM_LIPINSKIHDONORS'))
assert calc==orig,'bad num h donors for mol %d (%s): %d != %d'%(idx,m.GetProp('SMILES'),calc,orig)
calc = Lipinski.NOCount(m)
orig = int(m.GetProp('NUM_LIPINSKIHACCEPTORS'))
assert calc==orig,'bad num h acceptors for mol %d (%s): %d != %d'%(idx,m.GetProp('SMILES'),calc,orig)
calc = Lipinski.NumHDonors(m)
orig = int(m.GetProp('NUM_HDONORS'))
assert calc==orig,'bad num h donors for mol %d (%s): %d != %d'%(idx,m.GetProp('SMILES'),calc,orig)
calc = Lipinski.NumHAcceptors(m)
orig = int(m.GetProp('NUM_HACCEPTORS'))
assert calc==orig,'bad num h acceptors for mol %d (%s): %d != %d'%(idx,m.GetProp('SMILES'),calc,orig)
calc = Lipinski.NumHeteroatoms(m)
orig = int(m.GetProp('NUM_HETEROATOMS'))
assert calc==orig,'bad num heteroatoms for mol %d (%s): %d != %d'%(idx,m.GetProp('SMILES'),calc,orig)
calc = Lipinski.NumRotatableBonds(m)
orig = int(m.GetProp('NUM_ROTATABLEBONDS'))
assert calc==orig,'bad num rotors for mol %d (%s): %d != %d'%(idx,m.GetProp('SMILES'),calc,orig)
idx += 1
def testIssue2183420(self):
" testing a problem with the acceptor definition "
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('NC'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('CNC'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('CN(C)C'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('NC(=O)'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('NC(=O)C'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('CNC(=O)'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('CNC(=O)C'))==1)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('O=CNC(=O)C'))==2)
self.assertTrue(Lipinski.NumHAcceptors(Chem.MolFromSmiles('O=C(C)NC(=O)C'))==2)
if __name__ == '__main__':
unittest.main()
|
soerendip42/rdkit
|
rdkit/Chem/UnitTestLipinski.py
|
Python
|
bsd-3-clause
| 3,208
|
[
"RDKit"
] |
6b4ee94e728e20f16b5d788746d1eb88ae33a650a46bbb5da7e56e46a7d2d72e
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.talent_v4.services.tenant_service import TenantServiceAsyncClient
from google.cloud.talent_v4.services.tenant_service import TenantServiceClient
from google.cloud.talent_v4.services.tenant_service import pagers
from google.cloud.talent_v4.services.tenant_service import transports
from google.cloud.talent_v4.types import tenant
from google.cloud.talent_v4.types import tenant as gct_tenant
from google.cloud.talent_v4.types import tenant_service
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert TenantServiceClient._get_default_mtls_endpoint(None) is None
assert (
TenantServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
TenantServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [TenantServiceClient, TenantServiceAsyncClient,]
)
def test_tenant_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "jobs.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.TenantServiceGrpcTransport, "grpc"),
(transports.TenantServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_tenant_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [TenantServiceClient, TenantServiceAsyncClient,]
)
def test_tenant_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "jobs.googleapis.com:443"
def test_tenant_service_client_get_transport_class():
transport = TenantServiceClient.get_transport_class()
available_transports = [
transports.TenantServiceGrpcTransport,
]
assert transport in available_transports
transport = TenantServiceClient.get_transport_class("grpc")
assert transport == transports.TenantServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
TenantServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceClient),
)
@mock.patch.object(
TenantServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceAsyncClient),
)
def test_tenant_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(TenantServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(TenantServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc", "true"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc", "false"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
TenantServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceClient),
)
@mock.patch.object(
TenantServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_tenant_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class", [TenantServiceClient, TenantServiceAsyncClient]
)
@mock.patch.object(
TenantServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceClient),
)
@mock.patch.object(
TenantServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(TenantServiceAsyncClient),
)
def test_tenant_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport, "grpc"),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_tenant_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TenantServiceClient,
transports.TenantServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_tenant_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_tenant_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.talent_v4.services.tenant_service.transports.TenantServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = TenantServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
TenantServiceClient,
transports.TenantServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
TenantServiceAsyncClient,
transports.TenantServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_tenant_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
scopes=None,
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [tenant_service.CreateTenantRequest, dict,])
def test_create_tenant(request_type, transport: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant(
name="name_value", external_id="external_id_value",
)
response = client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
def test_create_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
client.create_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
@pytest.mark.asyncio
async def test_create_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.CreateTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_tenant.Tenant(name="name_value", external_id="external_id_value",)
)
response = await client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.CreateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
@pytest.mark.asyncio
async def test_create_tenant_async_from_dict():
await test_create_tenant_async(request_type=dict)
def test_create_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.CreateTenantRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
call.return_value = gct_tenant.Tenant()
client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.CreateTenantRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
await client.create_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tenant(
parent="parent_value", tenant=gct_tenant.Tenant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tenant
mock_val = gct_tenant.Tenant(name="name_value")
assert arg == mock_val
def test_create_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tenant(
tenant_service.CreateTenantRequest(),
parent="parent_value",
tenant=gct_tenant.Tenant(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tenant(
parent="parent_value", tenant=gct_tenant.Tenant(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tenant
mock_val = gct_tenant.Tenant(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tenant(
tenant_service.CreateTenantRequest(),
parent="parent_value",
tenant=gct_tenant.Tenant(name="name_value"),
)
@pytest.mark.parametrize("request_type", [tenant_service.GetTenantRequest, dict,])
def test_get_tenant(request_type, transport: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant(
name="name_value", external_id="external_id_value",
)
response = client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
def test_get_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
client.get_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
@pytest.mark.asyncio
async def test_get_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.GetTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant.Tenant(name="name_value", external_id="external_id_value",)
)
response = await client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.GetTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
@pytest.mark.asyncio
async def test_get_tenant_async_from_dict():
await test_get_tenant_async(request_type=dict)
def test_get_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.GetTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
call.return_value = tenant.Tenant()
client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.GetTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant())
await client.get_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tenant(
tenant_service.GetTenantRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tenant(
tenant_service.GetTenantRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [tenant_service.UpdateTenantRequest, dict,])
def test_update_tenant(request_type, transport: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant(
name="name_value", external_id="external_id_value",
)
response = client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
def test_update_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
client.update_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
@pytest.mark.asyncio
async def test_update_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.UpdateTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gct_tenant.Tenant(name="name_value", external_id="external_id_value",)
)
response = await client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.UpdateTenantRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gct_tenant.Tenant)
assert response.name == "name_value"
assert response.external_id == "external_id_value"
@pytest.mark.asyncio
async def test_update_tenant_async_from_dict():
await test_update_tenant_async(request_type=dict)
def test_update_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.UpdateTenantRequest()
request.tenant.name = "tenant.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
call.return_value = gct_tenant.Tenant()
client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tenant.name=tenant.name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.UpdateTenantRequest()
request.tenant.name = "tenant.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
await client.update_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "tenant.name=tenant.name/value",) in kw["metadata"]
def test_update_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tenant(
tenant=gct_tenant.Tenant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tenant
mock_val = gct_tenant.Tenant(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tenant(
tenant_service.UpdateTenantRequest(),
tenant=gct_tenant.Tenant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gct_tenant.Tenant()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gct_tenant.Tenant())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tenant(
tenant=gct_tenant.Tenant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tenant
mock_val = gct_tenant.Tenant(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tenant(
tenant_service.UpdateTenantRequest(),
tenant=gct_tenant.Tenant(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [tenant_service.DeleteTenantRequest, dict,])
def test_delete_tenant(request_type, transport: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tenant_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
client.delete_tenant()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
@pytest.mark.asyncio
async def test_delete_tenant_async(
transport: str = "grpc_asyncio", request_type=tenant_service.DeleteTenantRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.DeleteTenantRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tenant_async_from_dict():
await test_delete_tenant_async(request_type=dict)
def test_delete_tenant_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.DeleteTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
call.return_value = None
client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tenant_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.DeleteTenantRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tenant(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_tenant_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tenant_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tenant(
tenant_service.DeleteTenantRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tenant_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tenant), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tenant(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tenant_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tenant(
tenant_service.DeleteTenantRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [tenant_service.ListTenantsRequest, dict,])
def test_list_tenants(request_type, transport: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTenantsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tenants_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
client.list_tenants()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
@pytest.mark.asyncio
async def test_list_tenants_async(
transport: str = "grpc_asyncio", request_type=tenant_service.ListTenantsRequest
):
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant_service.ListTenantsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == tenant_service.ListTenantsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTenantsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tenants_async_from_dict():
await test_list_tenants_async(request_type=dict)
def test_list_tenants_field_headers():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.ListTenantsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
call.return_value = tenant_service.ListTenantsResponse()
client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tenants_field_headers_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = tenant_service.ListTenantsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant_service.ListTenantsResponse()
)
await client.list_tenants(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_tenants_flattened():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tenants(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tenants_flattened_error():
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tenants(
tenant_service.ListTenantsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tenants_flattened_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tenant_service.ListTenantsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tenant_service.ListTenantsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tenants(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tenants_flattened_error_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tenants(
tenant_service.ListTenantsRequest(), parent="parent_value",
)
def test_list_tenants_pager(transport_name: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tenants(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, tenant.Tenant) for i in results)
def test_list_tenants_pages(transport_name: str = "grpc"):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tenants), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
pages = list(client.list_tenants(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tenants_async_pager():
client = TenantServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
async_pager = await client.list_tenants(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tenant.Tenant) for i in responses)
@pytest.mark.asyncio
async def test_list_tenants_async_pages():
client = TenantServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tenants), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(), tenant.Tenant(),],
next_page_token="abc",
),
tenant_service.ListTenantsResponse(tenants=[], next_page_token="def",),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(),], next_page_token="ghi",
),
tenant_service.ListTenantsResponse(
tenants=[tenant.Tenant(), tenant.Tenant(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_tenants(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TenantServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = TenantServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = TenantServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.TenantServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.TenantServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = TenantServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.TenantServiceGrpcTransport,)
def test_tenant_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.TenantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_tenant_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.talent_v4.services.tenant_service.transports.TenantServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.TenantServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_tenant",
"get_tenant",
"update_tenant",
"delete_tenant",
"list_tenants",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_tenant_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.talent_v4.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
def test_tenant_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.talent_v4.services.tenant_service.transports.TenantServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.TenantServiceTransport()
adc.assert_called_once()
def test_tenant_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
TenantServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.TenantServiceGrpcTransport, grpc_helpers),
(transports.TenantServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_tenant_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"jobs.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
),
scopes=["1", "2"],
default_host="jobs.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_tenant_service_host_no_port():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint="jobs.googleapis.com"),
)
assert client.transport._host == "jobs.googleapis.com:443"
def test_tenant_service_host_with_port():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="jobs.googleapis.com:8000"
),
)
assert client.transport._host == "jobs.googleapis.com:8000"
def test_tenant_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TenantServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_tenant_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.TenantServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.TenantServiceGrpcTransport,
transports.TenantServiceGrpcAsyncIOTransport,
],
)
def test_tenant_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_tenant_path():
project = "squid"
tenant = "clam"
expected = "projects/{project}/tenants/{tenant}".format(
project=project, tenant=tenant,
)
actual = TenantServiceClient.tenant_path(project, tenant)
assert expected == actual
def test_parse_tenant_path():
expected = {
"project": "whelk",
"tenant": "octopus",
}
path = TenantServiceClient.tenant_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_tenant_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = TenantServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = TenantServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder,)
actual = TenantServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = TenantServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization,)
actual = TenantServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = TenantServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project,)
actual = TenantServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = TenantServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = TenantServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = TenantServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = TenantServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.TenantServiceTransport, "_prep_wrapped_messages"
) as prep:
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.TenantServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = TenantServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = TenantServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = TenantServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(TenantServiceClient, transports.TenantServiceGrpcTransport),
(TenantServiceAsyncClient, transports.TenantServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
googleapis/python-talent
|
tests/unit/gapic/talent_v4/test_tenant_service.py
|
Python
|
apache-2.0
| 92,876
|
[
"Octopus"
] |
ccd004438582b5bde9ef833efe423c66f75eef3d149b3113d319aff1e36e92e9
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
@pytest.fixture(autouse=True, scope='package')
def orca_context_fixture(request):
import os
from zoo.orca import OrcaContext, init_orca_context, stop_orca_context
OrcaContext._eager_mode = True
access_key_id = os.getenv("AWS_ACCESS_KEY_ID")
secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY")
if access_key_id is not None and secret_access_key is not None:
env = {"AWS_ACCESS_KEY_ID": access_key_id,
"AWS_SECRET_ACCESS_KEY": secret_access_key}
else:
env = None
sc = init_orca_context(cores=4, spark_log_level="INFO",
env=env, object_store_memory="1g")
yield sc
stop_orca_context()
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/friesian/feature/conftest.py
|
Python
|
apache-2.0
| 1,289
|
[
"ORCA"
] |
5b9d0625b5d33424c02950ca0c66a14aeb9e5da514040baae527b7641ae1edeb
|
# SPDX-License-Identifier: BSD-3-Clause
#
# Config file for GUS config generator.
#
# Names of GUS instrument files to use for playing MIDI.
# These are the names of the .pat files in the \ultrasnd\midi directory
# that are loaded into the card.
GUS_INSTR_PATCHES = {
0: "acpiano", # #001 - Acoustic Grand Piano
1: "britepno", # #002 - Bright Acoustic Piano
2: "synpiano", # #003 - Electric Grand Piano
3: "honky", # #004 - Honky-tonk Piano
4: "epiano1", # #005 - Electric Piano 1
5: "epiano2", # #006 - Electric Piano 2
6: "hrpschrd", # #007 - Harpsichord
7: "clavinet", # #008 - Clavi
8: "celeste", # #009 - Celesta
9: "glocken", # #010 - Glockenspiel
10: "musicbox", # #011 - Music Box
11: "vibes", # #012 - Vibraphone
12: "marimba", # #013 - Marimba
13: "xylophon", # #014 - Xylophone
14: "tubebell", # #015 - Tubular Bells
15: "santur", # #016 - Dulcimer
16: "homeorg", # #017 - Drawbar Organ
17: "percorg", # #018 - Percussive Organ
18: "rockorg", # #019 - Rock Organ
19: "church", # #020 - Church Organ
20: "reedorg", # #021 - Reed Organ
21: "accordn", # #022 - Accordion
22: "harmonca", # #023 - Harmonica
23: "concrtna", # #024 - Tango Accordion
24: "nyguitar", # #025 - Acoustic Guitar (nylon)
25: "acguitar", # #026 - Acoustic Guitar (steel)
26: "jazzgtr", # #027 - Electric Guitar (jazz)
27: "cleangtr", # #028 - Electric Guitar (clean)
28: "mutegtr", # #029 - Electric Guitar (muted)
29: "odguitar", # #030 - Overdriven Guitar
30: "distgtr", # #031 - Distortion Guitar
31: "gtrharm", # #032 - Guitar harmonics
32: "acbass", # #033 - Acoustic Bass
33: "fngrbass", # #034 - Electric Bass (finger)
34: "pickbass", # #035 - Electric Bass (pick)
35: "fretless", # #036 - Fretless Bass
36: "slapbas1", # #037 - Slap Bass 1
37: "slapbas2", # #038 - Slap Bass 2
38: "synbass1", # #039 - Synth Bass 1
39: "synbass2", # #040 - Synth Bass 2
40: "violin", # #041 - Violin
41: "viola", # #042 - Viola
42: "cello", # #043 - Cello
43: "contraba", # #044 - Contrabass
44: "tremstr", # #045 - Tremolo Strings
45: "pizzcato", # #046 - Pizzicato Strings
46: "harp", # #047 - Orchestral Harp
47: "timpani", # #048 - Timpani
48: "marcato", # #049 - String Ensemble 1
49: "slowstr", # #050 - String Ensemble 2
50: "synstr1", # #051 - SynthStrings 1
51: "synstr2", # #052 - SynthStrings 2
52: "choir", # #053 - Choir Aahs
53: "doo", # #054 - Voice Oohs
54: "voices", # #055 - Synth Voice
55: "orchhit", # #056 - Orchestra Hit
56: "trumpet", # #057 - Trumpet
57: "trombone", # #058 - Trombone
58: "tuba", # #059 - Tuba
59: "mutetrum", # #060 - Muted Trumpet
60: "frenchrn", # #061 - French Horn
61: "hitbrass", # #062 - Brass Section
62: "synbras1", # #063 - SynthBrass 1
63: "synbras2", # #064 - SynthBrass 2
64: "sprnosax", # #065 - Soprano Sax
65: "altosax", # #066 - Alto Sax
66: "tenorsax", # #067 - Tenor Sax
67: "barisax", # #068 - Baritone Sax
68: "oboe", # #069 - Oboe
69: "englhorn", # #070 - English Horn
70: "bassoon", # #071 - Bassoon
71: "clarinet", # #072 - Clarinet
72: "piccolo", # #073 - Piccolo
73: "flute", # #074 - Flute
74: "recorder", # #075 - Recorder
75: "woodflut", # #076 - Pan Flute
76: "bottle", # #077 - Blown Bottle
77: "shakazul", # #078 - Shakuhachi
78: "whistle", # #079 - Whistle
79: "ocarina", # #080 - Ocarina
80: "sqrwave", # #081 - Lead 1 (square)
81: "sawwave", # #082 - Lead 2 (sawtooth)
82: "calliope", # #083 - Lead 3 (calliope)
83: "chiflead", # #084 - Lead 4 (chiff)
84: "charang", # #085 - Lead 5 (charang)
85: "voxlead", # #086 - Lead 6 (voice)
86: "lead5th", # #087 - Lead 7 (fifths)
87: "basslead", # #088 - Lead 8 (bass + lead)
88: "fantasia", # #089 - Pad 1 (new age)
89: "warmpad", # #090 - Pad 2 (warm)
90: "polysyn", # #091 - Pad 3 (polysynth)
91: "ghostie", # #092 - Pad 4 (choir)
92: "bowglass", # #093 - Pad 5 (bowed)
93: "metalpad", # #094 - Pad 6 (metallic)
94: "halopad", # #095 - Pad 7 (halo)
95: "sweeper", # #096 - Pad 8 (sweep)
96: "aurora", # #097 - FX 1 (rain)
97: "soundtrk", # #098 - FX 2 (soundtrack)
98: "crystal", # #099 - FX 3 (crystal)
99: "atmosphr", # #100 - FX 4 (atmosphere)
100: "freshair", # #101 - FX 5 (brightness)
101: "unicorn", # #102 - FX 6 (goblins)
102: "echovox", # #103 - FX 7 (echoes)
103: "startrak", # #104 - FX 8 (sci-fi)
104: "sitar", # #105 - Sitar
105: "banjo", # #106 - Banjo
106: "shamisen", # #107 - Shamisen
107: "koto", # #108 - Koto
108: "kalimba", # #109 - Kalimba
109: "bagpipes", # #110 - Bag pipe
110: "fiddle", # #111 - Fiddle
111: "shannai", # #112 - Shanai
112: "carillon", # #113 - Tinkle Bell
113: "agogo", # #114 - Agogo
114: "steeldrm", # #115 - Steel Drums
115: "woodblk", # #116 - Woodblock
116: "taiko", # #117 - Taiko Drum
117: "toms", # #118 - Melodic Tom
118: "syntom", # #119 - Synth Drum
119: "revcym", # #120 - Reverse Cymbal
120: "fx-fret", # #121 - Guitar Fret Noise
121: "fx-blow", # #122 - Breath Noise
122: "seashore", # #123 - Seashore
123: "jungle", # #124 - Bird Tweet
124: "telephon", # #125 - Telephone Ring
125: "helicptr", # #126 - Helicopter
126: "applause", # #127 - Applause
127: "pistol", # #128 - Gunshot
128: "blank",
163: "kick1", # #35 Acoustic Bass Drum
164: "kick2", # #36 Bass Drum 1
165: "stickrim", # #37 Side Stick
166: "snare1", # #38 Acoustic Snare
167: "claps", # #39 Hand Clap
168: "snare2", # #40 Electric Snare
169: "tomlo2", # #41 Low Floor Tom
170: "hihatcl", # #42 Closed Hi Hat
171: "tomlo1", # #43 High Floor Tom
172: "hihatpd", # #44 Pedal Hi-Hat
173: "tommid2", # #45 Low Tom
174: "hihatop", # #46 Open Hi-Hat
175: "tommid1", # #47 Low-Mid Tom
176: "tomhi2", # #48 Hi-Mid Tom
177: "cymcrsh1", # #49 Crash Cymbal 1
178: "tomhi1", # #50 High Tom
179: "cymride1", # #51 Ride Cymbal 1
180: "cymchina", # #52 Chinese Cymbal
181: "cymbell", # #53 Ride Bell
182: "tamborin", # #54 Tambourine
183: "cymsplsh", # #55 Splash Cymbal
184: "cowbell", # #56 Cowbell
185: "cymcrsh2", # #57 Crash Cymbal 2
186: "vibslap", # #58 Vibraslap
187: "cymride2", # #59 Ride Cymbal 2
188: "bongohi", # #60 Hi Bongo
189: "bongolo", # #61 Low Bongo
190: "congahi1", # #62 Mute Hi Conga
191: "congahi2", # #63 Open Hi Conga
192: "congalo", # #64 Low Conga
193: "timbaleh", # #65 High Timbale
194: "timbalel", # #66 Low Timbale
195: "agogohi", # #67 High Agogo
196: "agogolo", # #68 Low Agogo
197: "cabasa", # #69 Cabasa
198: "maracas", # #70 Maracas
199: "whistle1", # #71 Short Whistle
200: "whistle2", # #72 Long Whistle
201: "guiro1", # #73 Short Guiro
202: "guiro2", # #74 Long Guiro
203: "clave", # #75 Claves
204: "woodblk1", # #76 Hi Wood Block
205: "woodblk2", # #77 Low Wood Block
206: "cuica1", # #78 Mute Cuica
207: "cuica2", # #79 Open Cuica
208: "triangl1", # #80 Mute Triangle
209: "triangl2", # #81 Open Triangle
}
# These are the data sizes of the patch files distributed with the
# GUS drivers. These are used to calculate the size in RAM of the
# generated patch sets to check that they are within the limits.
# and check it is within the limit.
PATCH_FILE_SIZES = {
"acbass": 5248, "accordn": 9616, "acguitar": 26080,
"acpiano": 32256, "agogo": 13696, "agogohi": 3488,
"agogolo": 3488, "altosax": 5616, "applause": 30064,
"atmosphr": 31360, "aurora": 31088, "bagpipes": 7760,
"banjo": 32016, "barisax": 10544, "basslead": 26496,
"bassoon": 8000, "belltree": 31888, "blank": 1520,
"bongohi": 3456, "bongolo": 4448, "bottle": 12368,
"bowglass": 24688, "britepno": 36000, "cabasa": 8448,
"calliope": 22992, "carillon": 5888, "castinet": 6016,
"celeste": 9936, "cello": 9120, "charang": 45056,
"chiflead": 31536, "choir": 22480, "church": 2144,
"claps": 5696, "clarinet": 9184, "clave": 2352,
"clavinet": 1440, "cleangtr": 22768, "concrtna": 8784,
"congahi1": 4224, "congahi2": 4704, "congalo": 4704,
"contraba": 4704, "cowbell": 3168, "crystal": 30224,
"cuica1": 9344, "cuica2": 12848, "cymbell": 17248,
"cymchina": 24112, "cymcrsh1": 31520, "cymcrsh2": 31040,
"cymride1": 17664, "cymride2": 17664, "cymsplsh": 31520,
"distgtr": 18848, "doo": 8464, "echovox": 14976,
"englhorn": 12096, "epiano1": 7344, "epiano2": 21936,
"fantasia": 23456, "fiddle": 5904, "flute": 6032,
"fngrbass": 9744, "frenchrn": 14128, "freshair": 28992,
"fretless": 2640, "fx-blow": 28688, "fx-fret": 13648,
"ghostie": 31488, "glocken": 5184, "gtrharm": 4928,
"guiro1": 4128, "guiro2": 9248, "halopad": 29984,
"harmonca": 7408, "harp": 11728, "helicptr": 25008,
"highq": 1808, "hihatcl": 4560, "hihatop": 20048,
"hihatpd": 1808, "hitbrass": 31520, "homeorg": 992,
"honky": 65680, "hrpschrd": 3584, "jazzgtr": 27712,
"jingles": 16944, "jungle": 13616, "kalimba": 2208,
"kick1": 4544, "kick2": 5024, "koto": 20832,
"lead5th": 6464, "maracas": 4560, "marcato": 61232,
"marimba": 2064, "metalpad": 30288, "metbell": 112,
"metclick": 112, "musicbox": 15312, "mutegtr": 17008,
"mutetrum": 9168, "nyguitar": 19200, "oboe": 3952,
"ocarina": 1616, "odguitar": 12640, "orchhit": 14208,
"percorg": 7520, "piccolo": 4320, "pickbass": 16416,
"pistol": 18144, "pizzcato": 19888, "polysyn": 30224,
"recorder": 2656, "reedorg": 1568, "revcym": 13536,
"rockorg": 30288, "santur": 21760, "sawwave": 27056,
"scratch1": 4384, "scratch2": 2288, "seashore": 31040,
"shakazul": 31136, "shaker": 3104, "shamisen": 13136,
"shannai": 9792, "sitar": 18288, "slap": 5856,
"slapbas1": 27872, "slapbas2": 20592, "slowstr": 18192,
"snare1": 8544, "snare2": 4096, "soundtrk": 19888,
"sprnosax": 7072, "sqrclick": 112, "sqrwave": 15056,
"startrak": 27376, "steeldrm": 11952, "stickrim": 2848,
"sticks": 4224, "surdo1": 9600, "surdo2": 9600,
"sweeper": 31216, "synbass1": 6160, "synbass2": 2928,
"synbras1": 30704, "synbras2": 30160, "synpiano": 5456,
"synstr1": 31216, "synstr2": 16416, "syntom": 30512,
"taiko": 18672, "tamborin": 8944, "telephon": 4416,
"tenorsax": 8448, "timbaleh": 5264, "timbalel": 9728,
"timpani": 7072, "tomhi1": 6576, "tomhi2": 6560,
"tomlo1": 6560, "tomlo2": 9600, "tommid1": 6560,
"tommid2": 6560, "toms": 6576, "tremstr": 61232,
"triangl1": 2224, "triangl2": 15792, "trombone": 12896,
"trumpet": 6608, "tuba": 5760, "tubebell": 9120,
"unicorn": 30096, "vibes": 10640, "vibslap": 9456,
"viola": 27952, "violin": 12160, "voices": 14976,
"voxlead": 14992, "warmpad": 18080, "whistle": 5872,
"whistle1": 2000, "whistle2": 928, "woodblk": 3680,
"woodblk1": 2352, "woodblk2": 3680, "woodflut": 1936,
"xylophon": 9376,
}
# Groups of "similar sounding" instruments. The first instrument in each
# group is the "leader" and will be used as the fallback for other
# instruments in the group if they are not popular enough to be included.
#
# These groups are based on having listened to the instruments in the
# GUS patch set using the generated comparison mid (see comparison.py),
# with similar sounding instruments being grouped together.
#
# If you want to improve the generated config, here's where to start.
# Separating out into more, smaller groups helps, but the 256KB
# config's limited size is quite restrictive. In particular, it's
# important that the "leader" instrument for each group is small
# (see table above of patch sizes).
SIMILAR_GROUPS = [
# Pianos.
('synpiano', 'acpiano', 'britepno', 'honky', 'epiano1', 'epiano2',
'celeste', 'glocken'),
# Harpsichord sounds noticeably different to pianos:
('hrpschrd', 'clavinet'),
# Xylophone etc.
('marimba', 'musicbox', 'vibes', 'xylophon', 'tubebell', 'carillon',
'santur', 'kalimba'),
# Organs.
('homeorg', 'percorg', 'rockorg', 'church', 'reedorg'),
# Accordion/Harmonica:
('accordn', 'harmonca', 'concrtna'),
# Guitars.
('nyguitar', 'acguitar', 'jazzgtr', 'cleangtr', 'mutegtr'),
# Overdriven/distortion guitars sound different. Besides, we
# definitely want at least one of these.
('odguitar', 'distgtr', 'gtrharm'),
# Basses.
('synbass2', 'acbass', 'fngrbass', 'pickbass', 'fretless', 'slapbas1',
'slapbas2', 'synbass1', 'basslead'),
# Violin and similar string instruments.
('violin', 'viola', 'cello', 'contraba', 'tremstr', 'pizzcato',
'harp'),
# Other stringed (?)
('synstr2', 'slowstr', 'marcato', 'synstr1', 'choir', 'doo', 'voices',
'orchhit', 'polysyn', 'bowglass'),
# Trumpet and other brass.
('trumpet', 'trombone', 'tuba', 'mutetrum', 'frenchrn', 'hitbrass',
'synbras1', 'synbras2'),
# Reed instruments.
('altosax', 'sprnosax', 'tenorsax', 'barisax', 'oboe', 'englhorn',
'bassoon', 'clarinet'),
# Pipe instruments.
('recorder', 'flute', 'piccolo', 'woodflut', 'bottle', 'shakazul',
'whistle', 'ocarina', 'bagpipes', 'fiddle', 'shannai',
'calliope', 'chiflead', 'charang'),
# Leads:
('sqrwave', 'sawwave', 'voxlead', 'lead5th'),
# Odd stringed instruments.
('sitar', 'banjo', 'shamisen', 'koto'),
# Special effects. Blank unless popular enough to appear.
('blank', 'fantasia', 'warmpad', 'ghostie',
'metalpad', 'halopad', 'sweeper', 'aurora', 'soundtrk', 'crystal',
'atmosphr', 'freshair', 'unicorn', 'echovox', 'startrak', 'fx-fret',
'fx-blow', 'seashore', 'jungle', 'telephon', 'helicptr', 'applause',
'pistol'),
# Percussion sounds.
# Kick:
('kick2', 'steeldrm', 'taiko', 'kick1'),
# Conga:
('congahi2', 'congahi1', 'congalo'),
# Snare drums:
('snare2', 'claps', 'snare1'),
# Toms:
('tomlo1', 'toms', 'syntom', 'tomlo2', 'tommid1', 'tommid2', 'tomhi2',
'tomhi1', 'timpani'),
# Cymbal crash:
('cymsplsh', 'cymcrsh2', 'cymcrsh1', 'revcym', 'cymchina'),
# Cymbal ride:
('cymride1', 'cymride2', 'cymbell'),
# Hi-hat:
('hihatpd', 'hihatcl', 'hihatop'),
# Metallic sounding:
('bongohi', 'bongolo', 'timbaleh', 'timbalel', 'cowbell',
'agogohi', 'agogolo', 'agogo', 'triangl1', 'triangl2'),
# Click:
('stickrim', 'woodblk1', 'woodblk2', 'woodblk', 'tamborin', 'clave'),
# Random things.
('cabasa', 'whistle1', 'whistle2', 'vibslap', 'maracas',
'guiro1', 'guiro2', 'cuica1', 'cuica2'),
]
|
CWolfRU/freedoom
|
lumps/dmxgus/config.py
|
Python
|
bsd-3-clause
| 14,982
|
[
"CRYSTAL"
] |
9520e8f3983d03bf5309a7a7fbf487705b3dd401890603d4014c7e9536aaacde
|
"""
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
try: # SciPy >= 0.16 have face in misc
from scipy.misc import face
face = face(gray=True)
except ImportError:
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255.
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
# #############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
# #############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
# #############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
|
chenyyx/scikit-learn-doc-zh
|
examples/zh/decomposition/plot_image_denoising.py
|
Python
|
gpl-3.0
| 5,878
|
[
"Gaussian"
] |
cac9a763471a7761a296128b36e54623e5679bdbbbc2faf7e28e8d09785ba2c2
|
import os
from math import pi, cos, sin
from ase import Atom, Atoms
from ase.parallel import rank, barrier
from gpaw import GPAW
from gpaw.test import equal, gen
import numpy as np
# Generate setup for oxygen with half a core-hole:
gen('O', name='hch1s', corehole=(1, 0, 0.5))
if 1:
a = 5.0
d = 0.9575
t = pi / 180 * 104.51
H2O = Atoms([Atom('O', (0, 0, 0)),
Atom('H', (d, 0, 0)),
Atom('H', (d * cos(t), d * sin(t), 0))],
cell=(a, a, a), pbc=False)
H2O.center()
calc = GPAW(nbands=10, h=0.2, setups={'O': 'hch1s'})
H2O.set_calculator(calc)
e = H2O.get_potential_energy()
niter = calc.get_number_of_iterations()
calc.write('h2o.gpw')
else:
calc = GPAW('h2o.gpw')
calc.initialize_positions()
from gpaw.xas import RecursionMethod
if 1:
r = RecursionMethod(calc)
r.run(400)
r.write('h2o.pckl')
else:
r = RecursionMethod(filename='h2o.pckl')
if 0:
from pylab import *
x = -30 + 40 * np.arange(300) / 300.0
for n in range(50, 401, 50):
y = r.get_spectra(x, imax=n)
plot(x, y[0], label=str(n))
legend()
show()
print e, niter
energy_tolerance = 0.0002
niter_tolerance = 0
equal(e, -17.9621, energy_tolerance)
|
robwarm/gpaw-symm
|
gpaw/test/h2o_xas_recursion.py
|
Python
|
gpl-3.0
| 1,259
|
[
"ASE",
"GPAW"
] |
11d9285d902ff95401265c18a94b1a4f42db90e989ffff2280b76b14d3bb5719
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from paddle.utils import gast
from paddle.fluid import unique_name
from paddle.fluid.dygraph.dygraph_to_static.utils import index_in_list
from paddle.fluid.dygraph.dygraph_to_static.utils import ForNodeVisitor
from paddle.fluid.dygraph.dygraph_to_static.utils import BaseNodeVisitor
from paddle.fluid.dygraph.dygraph_to_static.variable_trans_func import create_fill_constant_node
__all__ = ['BreakContinueTransformer']
BREAK_NAME_PREFIX = '__break'
CONTINUE_NAME_PREFIX = '__continue'
class ForToWhileTransformer(gast.NodeTransformer):
"""
Transform python for loop into while loop and add condition node in the
loop test
"""
def __init__(self, parent_node, loop_node, condition_node):
assert isinstance(
loop_node,
gast.For), "loop_node is not gast.For in ForToWhileTransformer"
self.parent_node = parent_node
self.loop_node = loop_node
self.condition_node = condition_node
def transform(self):
if hasattr(self.parent_node, 'body'):
body_list = self.parent_node.body
i = index_in_list(body_list, self.loop_node)
if i != -1:
new_stmts = self.get_for_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
return new_stmts
if hasattr(self.parent_node, 'orelse'):
body_list = self.parent_node.orelse
i = index_in_list(body_list, self.loop_node)
if i != -1:
new_stmts = self.get_for_stmt_nodes(body_list[i])
body_list[i:i + 1] = new_stmts
i += len(new_stmts)
return new_stmts
raise ValueError(
"parent_node doesn't contain the loop_node in ForToWhileTransformer")
def get_for_stmt_nodes(self, node):
assert isinstance(
node, gast.For), "Input node is NOT gast.For in get_for_stmt_nodes"
# 1. parse current gast.For node
current_for_node_parser = ForNodeVisitor(node)
stmts_tuple = current_for_node_parser.parse()
if stmts_tuple is None:
return [node]
init_stmts, cond_stmt, body_stmts = stmts_tuple
# 2. append break statement
new_cond_stmt = gast.BoolOp(
op=gast.And(), values=[cond_stmt, self.condition_node])
# 3. construct gast.While node
while_node = gast.While(
test=new_cond_stmt, body=body_stmts, orelse=node.orelse)
init_stmts.append(while_node)
return init_stmts
class BreakContinueTransformer(BaseNodeVisitor):
"""
Rewrite 'break' and 'continue' key words in a if-else python way to make
it equivalent to original control flow
The main idea of this class is:
1. Map the 'break/continue' stmt with an unique boolean variable V.
2. Find the first ancestor block containing this 'break/continue', a
block can be a node containing stmt list. We should remove all stmts
after the 'break/continue' and set the V to True here.
3. Add 'if V' for stmts in ancestor blocks between the first one
(exclusive) and the ancestor loop (inclusive)
4. For 'break' add break into condition of the loop. For 'continue',
set continue to False at the beginning of each loop
TODO: more details should be summarized as design document
Note: The class is inherited from BaseNodeVisitor instead of NodeTransformer,
because ancestor nodes will be modified inplace for `Break/Continue` here.
In general, we recommend to inheriting NodeTransformer to modify node!
"""
def __init__(self, wrapper_root):
super(BreakContinueTransformer, self).__init__()
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def transform(self):
self.visit(self.root)
def visit_Break(self, node):
loop_node_index = _find_ancestor_loop_index(node, self.ancestor_nodes)
assert loop_node_index != -1, "SyntaxError: 'break' outside loop"
loop_node = self.ancestor_nodes[loop_node_index]
# 1. Map the 'break/continue' stmt with an unique boolean variable V.
variable_name = unique_name.generate(BREAK_NAME_PREFIX)
# 2. Find the first ancestor block containing this 'break/continue', a
# block can be a node containing stmt list. We should remove all stmts
# after the 'break/continue' and set the V to True here.
first_block_index = self._remove_stmts_after_break_continue(
node, variable_name, loop_node_index)
# 3. Add 'if not V' for stmts in ancestor blocks between the first one
# (exclusive) and the ancestor loop (inclusive)
self._replace_if_stmt(loop_node_index, first_block_index, variable_name)
# 4. For 'break' add break into condition of the loop.
assign_false_node = create_fill_constant_node(variable_name, False)
self._add_stmt_before_cur_node(loop_node_index, assign_false_node)
cond_var_node = gast.UnaryOp(
op=gast.Not(),
operand=gast.Name(
id=variable_name,
ctx=gast.Load(),
annotation=None,
type_comment=None))
if isinstance(loop_node, gast.While):
loop_node.test = gast.BoolOp(
op=gast.And(), values=[loop_node.test, cond_var_node])
elif isinstance(loop_node, gast.For):
parent_node = self.ancestor_nodes[loop_node_index - 1]
for_to_while = ForToWhileTransformer(parent_node, loop_node,
cond_var_node)
for_to_while.transform()
def visit_Continue(self, node):
loop_node_index = _find_ancestor_loop_index(node, self.ancestor_nodes)
assert loop_node_index != -1, "SyntaxError: 'continue' outside loop"
loop_node = self.ancestor_nodes[loop_node_index]
# 1. Map the 'break/continue' stmt with an unique boolean variable V.
variable_name = unique_name.generate(CONTINUE_NAME_PREFIX)
# 2. Find the first ancestor block containing this 'break/continue', a
# block can be a node containing stmt list. We should remove all stmts
# after the 'break/continue' and set the V to True here.
first_block_index = self._remove_stmts_after_break_continue(
node, variable_name, loop_node_index)
# 3. Add 'if not V' for stmts in ancestor blocks between the first one
# (exclusive) and the ancestor loop (inclusive)
self._replace_if_stmt(loop_node_index, first_block_index, variable_name)
# 4. For 'continue', set continue to False at the beginning of each loop
assign_false_node = create_fill_constant_node(variable_name, False)
loop_node.body.insert(0, assign_false_node)
def _remove_stmts_after_break_continue(
self, break_continue_node, break_continue_name, loop_node_index):
for first_block_index in range(
len(self.ancestor_nodes) - 1, loop_node_index - 1, -1):
first_block = self.ancestor_nodes[first_block_index]
if hasattr(first_block,
"body") and self._replace_break_continue_in_stmt_list(
first_block.body, break_continue_node,
break_continue_name):
return first_block_index
if hasattr(first_block,
"orelse") and self._replace_break_continue_in_stmt_list(
first_block.orelse, break_continue_node,
break_continue_name):
return first_block_index
return first_block_index
def _replace_if_stmt(self, loop_node_index, first_block_index,
break_continue_name):
for i in range(first_block_index - 1, loop_node_index - 1, -1):
cur_node = self.ancestor_nodes[i]
son_node = self.ancestor_nodes[i + 1]
if hasattr(cur_node,
'body') and self._replace_after_node_to_if_in_stmt_list(
cur_node.body, son_node, break_continue_name):
continue
if hasattr(
cur_node,
'orelse') and self._replace_after_node_to_if_in_stmt_list(
cur_node.orelse, son_node, break_continue_name):
continue
def _replace_break_continue_in_stmt_list(
self, stmt_list, break_continue_node, break_continue_name):
i = index_in_list(stmt_list, break_continue_node)
if i == -1:
return False
assign_true_node = create_fill_constant_node(break_continue_name, True)
stmt_list[i:] = [assign_true_node]
return True
def _replace_after_node_to_if_in_stmt_list(self, stmt_list, node,
break_continue_name):
i = index_in_list(stmt_list, node)
if i == -1:
return False
if i == len(stmt_list) - 1:
# No need to add, we consider this as added successfully
return True
if_stmt = gast.If(test=gast.UnaryOp(
op=gast.Not(),
operand=gast.Name(
id=break_continue_name,
ctx=gast.Store(),
annotation=None,
type_comment=None)),
body=stmt_list[i + 1:],
orelse=[])
stmt_list[i + 1:] = []
stmt_list.append(if_stmt)
return True
def _add_stmt_before_cur_node(self, cur_node_index, stmt_node):
cur_node = self.ancestor_nodes[cur_node_index]
parent_node = self.ancestor_nodes[cur_node_index - 1]
if hasattr(parent_node,
"body") and self._add_stmt_into_list_before_node(
parent_node.body, cur_node, stmt_node):
return True
if hasattr(parent_node,
"orelse") and self._add_stmt_into_list_before_node(
parent_node.orelse, cur_node, stmt_node):
return True
return False
def _add_stmt_into_list_before_node(self, stmt_list, node, stmt_node):
i = index_in_list(stmt_list, node)
if i == -1:
return False
stmt_list.insert(i, stmt_node)
return True
def _find_ancestor_loop_index(node, ancestor_nodes):
for i in range(len(ancestor_nodes) - 1, -1, -1):
if isinstance(ancestor_nodes[i], (gast.For, gast.While)):
return i
return -1
class BreakTransformOptimizer(BaseNodeVisitor):
"""
In specific pattern, the transformed code could be optimized by joining the
If.test with while.test.
Currently supported pattern is:
```
while cond1: while cond1 and not cond2:
if cond2: ---> do_something()
break
do_something()
```
See following example:
>>> def foo(x):
... i = paddle.to_tensor(1, dtype='int32')
... while i < 10:
... if x.mean() > 5:
... break
... x += i
... i += 1
... return x
The generated code after applying optimization will be:
```
def foo(x):
i = paddle.to_tensor(1, dtype='int32')
while i < 10 and not x.mean() > 5:
x += i
i += 1
return x
```
It can avoid wrapping all ops after `break` statement into `cond_op` that
usually brings very heavy overhead.
"""
def __init__(self, wrapper_root):
super(BreakTransformOptimizer, self).__init__()
self.wrapper_root = wrapper_root
self.root = wrapper_root.node
def transform(self):
self.visit(self.root)
def visit_Break(self, node):
loop_node_index = _find_ancestor_loop_index(node, self.ancestor_nodes)
assert loop_node_index != -1, "SyntaxError: 'break' outside loop"
loop_node = self.ancestor_nodes[loop_node_index]
if self._is_break_cond_pattern(node, loop_node):
cond_var_node = self._join_with_while_cond(node, loop_node)
if isinstance(loop_node, gast.While):
loop_node.test = gast.BoolOp(
op=gast.And(), values=[loop_node.test, cond_var_node])
elif isinstance(loop_node, gast.For):
parent_node = self.ancestor_nodes[loop_node_index - 1]
for_to_while = ForToWhileTransformer(parent_node, loop_node,
cond_var_node)
for_to_while.transform()
def _is_break_cond_pattern(self, break_node, loop_node):
"""
Judge whether if match the pattern to join `If.test` with `while.test`
"""
# while/for -> if -> break
if len(self.ancestor_nodes) < 3 or self.ancestor_nodes[-3] != loop_node:
return False
assert self.ancestor_nodes[-1] == break_node
parent_if_node = self.ancestor_nodes[-2]
is_matched = False
if isinstance(parent_if_node, gast.If):
# gast.If only contains `break`
break_first_in_if = parent_if_node.body[0] == break_node and len(
parent_if_node.orelse) == 0
# gast.If is first node of loop_node
if_first_in_loop = loop_node.body[0] == parent_if_node
is_matched = if_first_in_loop and break_first_in_if
return is_matched
def _join_with_while_cond(self, break_node, loop_node):
"""
Join the `If.test` with `While.test` together.
"""
parent_if_node = self.ancestor_nodes[-2]
cond_var_node = gast.UnaryOp(op=gast.Not(), operand=parent_if_node.test)
# remove the gast.If node that contains the gast.Break.
assert loop_node.body[0] == parent_if_node
loop_node.body.pop(0)
return cond_var_node
|
luotao1/Paddle
|
python/paddle/fluid/dygraph/dygraph_to_static/break_continue_transformer.py
|
Python
|
apache-2.0
| 14,751
|
[
"VisIt"
] |
6c2afcb726ab0e5b42134014b747b9e16f08685c303ff81ffc56a0e2ac657317
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, sys
from TestHarness import util
from FileTester import FileTester
class AnalyzeJacobian(FileTester):
@staticmethod
def validParams():
params = FileTester.validParams()
params.addRequiredParam('input', "The input file to use for this test.")
params.addParam('test_name', "The name of the test - populated automatically")
params.addParam('expect_out', "A regular expression that must occur in the input in order for the test to be considered passing.")
params.addParam('resize_mesh', False, "Resize the input mesh")
params.addParam('off_diagonal', True, "Also test the off-diagonal Jacobian entries")
params.addParam('mesh_size', 1, "Resize the input mesh")
return params
def __init__(self, name, params):
FileTester.__init__(self, name, params)
def getOutputFiles(self):
# analizejacobian.py outputs files prefixed with the input file name
return [self.specs['input']]
def prepare(self, options):
# We do not know what file(s) analizejacobian.py produces
return
# Check if numpy is available
def checkRunnable(self, options):
try:
import numpy
assert numpy # silence pyflakes warning
return True
except Exception:
self.addCaveats('skipped (no numpy)')
return False
def getCommand(self, options):
specs = self.specs
# Create the command line string to run
command = os.path.join(specs['moose_dir'], 'python', 'jacobiandebug', 'analyzejacobian.py')
# Check for built application
if not options.dry_run and not os.path.exists(command):
print('Application not found: ' + str(specs['executable']))
sys.exit(1)
mesh_options = ' -m %s' % options.method
if specs['resize_mesh'] :
mesh_options += ' -r -s %d' % specs['mesh_size']
if not specs['off_diagonal'] :
mesh_options += ' -D'
command += mesh_options + ' ' + specs['input'] + ' -e ' + specs['executable'] + ' '
if len(specs['cli_args']):
command += '--cli-args "' + (' '.join(specs['cli_args']) + '"')
return command
def processResults(self, moose_dir, options, output):
reason = ''
specs = self.specs
if specs.isValid('expect_out'):
out_ok = util.checkOutputForPattern(output, specs['expect_out'])
if (out_ok and self.exit_code != 0):
reason = 'OUT FOUND BUT CRASH'
elif (not out_ok):
reason = 'NO EXPECTED OUT'
if reason == '':
if self.exit_code != 0 :
reason = 'CRASH'
if reason != '':
self.setStatus(self.fail, reason)
return output
|
nuclear-wizard/moose
|
python/TestHarness/testers/AnalyzeJacobian.py
|
Python
|
lgpl-2.1
| 3,154
|
[
"MOOSE"
] |
56d1a840db06af91b8fe512d1713fa9f184f860e1c8b98e130aca90c79cc634c
|
# -*- coding: utf-8 -*-
"""
Called by ~/local/vim/rc/custom_misc_functions.vim
References:
# The vim python module documentation
http://vimdoc.sourceforge.net/htmldoc/if_pyth.html
ToLookAt:
https://github.com/ivanov/ipython-vimception
FIXME:
the indexing is messed up because some places row2 means the last line,
instead of the last line you dont want
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from os.path import expanduser, exists, join, isdir, abspath
import sys
import os
import re
import itertools as it
# try:
# import importlib
# reload = importlib.reload
# except (AttributeError, ImportError) as ex:
# # print('ex = {!r}'.format(ex))
# # import imp
# raise
# # reload = imp.reload
def get_bibtex_dict():
import ubelt as ub
# HACK: custom current bibtex file
possible_bib_fpaths = [
ub.truepath('./My_Library_clean.bib'),
#ub.truepath('~/latex/crall-thesis-2017/My_Library_clean.bib'),
]
bib_fpath = None
for bib_fpath_ in possible_bib_fpaths:
if exists(bib_fpath_):
bib_fpath = bib_fpath_
break
if bib_fpath is None:
raise Exception('cant find bibtex file')
# import bibtexparser
from bibtexparser import bparser
parser = bparser.BibTexParser()
parser.ignore_nonstandard_types = True
bib_text = ub.readfrom(bib_fpath)
bibtex_db = parser.parse(bib_text)
bibtex_dict = bibtex_db.get_entry_dict()
return bibtex_dict
def available_fonts():
win32_fonts = [
r'Inconsolata:h10',
r'Mono_Dyslexic:h10:cANSI',
r'OpenDyslexicMono\ 10',
# r'monofur:h11',
#r'Mono\ Dyslexic:h10',
#r'Inconsolata:h11',
#r'Source_Code_Pro:h11:cANSI',
#r'peep:h11:cOEM',
#r'Consolas',
#r'Liberation Mono',
#r'Lucida_Console:h10',
#r'Fixedsys',
#r'Courier:h10:cANSI',
#r'Courier New',
#r'DejaVu Sans Mono',
]
#win32_alts = {
# 'monodyslexic': [r'Mono_Dyslexic:h10:cANSI']
#}
linux_fonts = [
r'Inconsolata\ Medium\ 9',
r'Inconsolata\ Medium\ 11',
r'MonoDyslexic\ 9.4',
# r'OpenDyslexicMono\ 10',
r'FreeMono\ Bold\ 10',
# r'monofur\ 11',
# r'EversonMono',
]
#linux_extended = [
# r'MonoDyslexic\ 10',
# r'Inconsolata\ Medium\ 10',
# r'Courier\ New\ 11',
# #r'OpenDyslexic\ 10',
# #r'Neep\ 11',
# #r'Nimbus\ Mono\ L\ 11',
# r'Ubuntu\ Mono\ 9',
# r'Neep\ Alt\ Medium\ Semi-Condensed\ 11'
# r'White\ Rabbit\ 10',
#]
#linux_fonts = sorted(linux_fonts + linux_extended)
if sys.platform.startswith('win32'):
known_fonts = win32_fonts
else:
known_fonts = linux_fonts
return known_fonts
def pyrun_fuzzyfont(request):
"""
Sets a font from an index or a string
"""
import vim
try:
import six
string_types = six.string_types
except Exception:
string_types = (str,)
from operator import itemgetter
def vimprint(message):
#print('message = %r' % (message,))
# this doesnt even work #vim.command(':silent !echom %r' % message)
# vim.command(':echom %r' % message)
pass
vimprint('--- Called Fuzyzfont ---')
known_fonts = available_fonts()
vimprint('numfonts=%r' % (len(known_fonts)))
vimprint('request=%r %r' % (type(request), request))
int_str = map(str, range(0, 10))
try:
is_integer_str = all([_ in int_str for _ in request])
except TypeError:
is_integer_str = False
if isinstance(request, string_types) and not is_integer_str:
# Calcualate edit distance to each known font
try:
import Levenshtein # Edit distance algorithm
except ImportError:
vim.command(":echom 'error no python module Levenshtein"
"(pip install python-levenshtein)'")
else:
edit_distance = Levenshtein.distance
known_dists = [edit_distance(known.lower(), request.lower())
for known in known_fonts]
# Pick the minimum distance
min_index = min(enumerate(known_dists), key=itemgetter(1))[0]
fontindex = min_index
else:
fontindex = int(request) % len(known_fonts)
fontstr = known_fonts[fontindex]
# Set as current font
vimprint('fontindex=%r fontstr=%r' % (fontindex, fontstr))
vim.command('set gfn=' + fontstr)
vimprint('--- /Called Fuzyzfont ---')
def get_line_at_cursor():
import vim
buf = vim.current.buffer
(row, col) = vim.current.window.cursor
line = buf[row - 1]
return line
def get_first_nonempty_line_after_cursor():
import vim
buf = vim.current.buffer
(row, col) = vim.current.window.cursor
for i in range(len(buf) - row):
line = buf[row + i]
if line:
return line
def get_indentation(line_):
"""
returns the number of preceding spaces
"""
return len(line_) - len(line_.lstrip())
def get_minimum_indentation(text):
r"""
returns the number of preceding spaces
Args:
text (str): unicode text
Returns:
int: indentation
CommandLine:
xdoctest -m utool.util_str --exec-get_minimum_indentation --show
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> text = ' foo\n bar'
>>> result = get_minimum_indentation(text)
>>> print(result)
3
"""
lines = text.split('\n')
indentations = [get_indentation(line_)
for line_ in lines if len(line_.strip()) > 0]
if len(indentations) == 0:
return 0
return min(indentations)
def get_cursor_py_indent():
"""
checks current and next line for indentation
"""
# Check current line for cues
curr_line = get_line_at_cursor()
curr_indent = get_minimum_indentation(curr_line)
if curr_line is None:
next_line = ''
if curr_line.strip().endswith(':'):
curr_indent += 4
# Check next line for cues
next_line = get_first_nonempty_line_after_cursor()
if next_line is None:
next_line = ''
next_indent = get_minimum_indentation(next_line)
if next_indent <= curr_indent + 8:
# hack for overindented lines
min_indent = max(curr_indent, next_indent)
else:
min_indent = curr_indent
indent = (' ' * min_indent)
if curr_line.strip().startswith('>>>'):
indent += '>>> '
return indent
def get_word_at_cursor(url_ok=False):
""" returns the word highlighted by the curor """
import vim
buf = vim.current.buffer
(row, col) = vim.current.window.cursor
line = buf[row - 1] # Original end of the file
if url_ok:
nonword_chars_left = ' \t\n\r{},"\'\\'
nonword_chars_right = nonword_chars_left
else:
nonword_chars_left = ' \t\n\r[](){}:;,"\'\\/=$*'
nonword_chars_right = ' \t\n\r[](){}:;,"\'\\/=$*.'
word = get_word_in_line_at_col(line, col,
nonword_chars_left=nonword_chars_left,
nonword_chars_right=nonword_chars_right)
return word
def get_word_in_line_at_col(line, col,
nonword_chars_left=' \t\n\r[](){}:;,"\'\\/',
nonword_chars_right=None):
r"""
Args:
line (?):
col (?):
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-get_word_in_line_at_col
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> line = 'myvar.foo = yourvar.foobar'
>>> line = 'def loadfunc(self):'
>>> col = 6
>>> nonword_chars=' \t\n\r[](){}:;.,"\'\\/'
>>> word = get_word_in_line_at_col(line, col, nonword_chars)
>>> result = ('word = %r' % (word,))
>>> print(result)
"""
if nonword_chars_right is None:
nonword_chars_right = nonword_chars_left
lpos = col
rpos = col
while lpos > 0:
# Expand to the left
if line[lpos] in nonword_chars_left:
lpos += 1
break
lpos -= 1
while rpos < len(line):
# Expand to the right
if line[rpos] in nonword_chars_right:
break
rpos += 1
word = line[lpos:rpos]
return word
# --- Find file markers
def find_pyclass_above_row(line_list, row):
""" originally part of the vim plugin """
# Get text posision
pattern = '^class [a-zA-Z_]'
classline, classpos = find_pattern_above_row(pattern, line_list, row, maxIter=None)
return classline, classpos
def parse_callname(searchline, sentinal='def '):
"""
Parses the function or class name from a signature line
originally part of the vim plugin
"""
rparen_pos = searchline.find('(')
if rparen_pos > 0:
callname = searchline[len(sentinal):rparen_pos].strip(' ')
return callname
return None
def find_pattern_above_row(pattern, line_list='current', row='current', maxIter=50):
"""
searches a few lines above the curror until it **matches** a pattern
"""
if row == 'current':
import vim
row = vim.current.window.cursor[0] - 1
line_list = vim.current.buffer
# Iterate until we match.
# Janky way to find function / class name
for ix in it.count(0):
pos = row - ix
if maxIter is not None and ix > maxIter:
break
if pos < 0:
break
searchline = line_list[pos]
if re.match(pattern, searchline) is not None:
return searchline, pos
return None
def find_pyfunc_above_row(line_list, row, orclass=False):
"""
originally part of the vim plugin
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py find_pyfunc_above_row
Example:
>>> import ubelt as ub
>>> import six
>>> func = find_pyfunc_above_row
>>> fpath = six.get_function_globals(func)['__file__'].replace('.pyc', '.py')
>>> line_list = ub.readfrom(fpath, aslines=True)
>>> row = six.get_function_code(func).co_firstlineno + 1
>>> funcname, searchlines, func_pos, foundline = find_pyfunc_above_row(line_list, row)
>>> print(funcname)
find_pyfunc_above_row
"""
import ubelt as ub
searchlines = [] # for debugging
funcname = None
# Janky way to find function name
func_sentinal = 'def '
method_sentinal = ' def '
class_sentinal = 'class '
for ix in range(200):
func_pos = row - ix
searchline = line_list[func_pos]
searchline = ub.ensure_unicode(searchline)
cleanline = searchline.strip(' ')
searchlines.append(cleanline)
if searchline.startswith(func_sentinal): # and cleanline.endswith(':'):
# Found a valid function name
funcname = parse_callname(searchline, func_sentinal)
if funcname is not None:
break
if orclass and searchline.startswith(class_sentinal):
# Found a valid class name (as funcname)
funcname = parse_callname(searchline, class_sentinal)
if funcname is not None:
break
if searchline.startswith(method_sentinal): # and cleanline.endswith(':'):
# Found a valid function name
funcname = parse_callname(searchline, method_sentinal)
if funcname is not None:
classline, classpos = find_pyclass_above_row(line_list, func_pos)
classname = parse_callname(classline, class_sentinal)
if classname is not None:
funcname = '.'.join([classname, funcname])
break
else:
funcname = None
foundline = searchline
return funcname, searchlines, func_pos, foundline
def find_pyfunc_above_cursor():
import vim
# Get text posision
(row, col) = vim.current.window.cursor
line_list = vim.current.buffer
funcname, searchlines, pos, foundline = find_pyfunc_above_row(line_list, row, True)
info = {
'funcname': funcname,
'searchlines': searchlines,
'pos': pos,
'foundline': foundline,
}
return info
def is_paragraph_end(line_):
# Hack, par_marker_list should be an argument
import ubelt as ub
striped_line = ub.ensure_unicode(line_.strip())
isblank = striped_line == ''
if isblank:
return True
par_marker_list = [
#'\\noindent',
'\\begin{equation}',
'\\end{equation}',
'% ---',
]
return any(striped_line.startswith(marker)
for marker in par_marker_list)
def find_paragraph_end(row_, direction=1):
"""
returns the line that a paragraph ends on in some direction
TODO Rectify with ut.find_block_end
"""
import vim
line_list = vim.current.buffer
line_ = line_list[row_ - 1]
if (row_ == 0 or row_ == len(line_list) - 1):
return row_
if is_paragraph_end(line_):
return row_
while True:
if (row_ == -1 or row_ == len(line_list)):
break
line_ = line_list[row_ - 1]
if is_paragraph_end(line_):
break
row_ += direction
row_ -= direction
return row_
def get_paragraph_line_range_at_cursor():
"""
Fixme row2 should be the first row you do not want
"""
# Get cursor position
import vim
(row, col) = vim.current.window.cursor
row1 = find_paragraph_end(row, -1)
row2 = find_paragraph_end(row, +1)
return row1, row2
# --- Text extractors
def get_selected_text(select_at_cursor=False):
""" make sure the vim function calling this has a range after ()
Currently used by <ctrl+g>
References:
http://stackoverflow.com/questions/18165973/vim-obtain-string-between-visual-selection-range-with-python
SeeAlso:
~/local/vim/rc/custom_misc_functions.vim
Test paragraph.
Far out in the uncharted backwaters of the unfashionable end of the western
spiral arm of the Galaxy lies a small unregarded yellow sun. Orbiting this at a
distance of roughly ninety-two million miles is an utterly insignificant little
blue green planet whose ape-descended life forms are so amazingly primitive
that they still think digital watches are a pretty neat idea.
% ---
one. two three. four.
"""
import vim
buf = vim.current.buffer
(lnum1, col1) = buf.mark('<')
(lnum2, col2) = buf.mark('>')
text = get_text_between_lines(lnum1, lnum2, col1, col2)
return text
def get_text_between_lines(lnum1, lnum2, col1=0, col2=sys.maxsize - 1):
import vim
lines = vim.eval('getline({}, {})'.format(lnum1, lnum2))
import ubelt as ub
lines = list(map(ub.ensure_unicode, lines))
try:
if len(lines) == 0:
pass
elif len(lines) == 1:
lines[0] = lines[0][col1:col2 + 1]
else:
lines[0] = lines[0][col1:]
lines[-1] = lines[-1][:col2 + 1]
text = '\n'.join(lines)
except Exception:
print(ub.repr2(lines))
raise
return text
def get_codelines_around_buffer(rows_before=0, rows_after=10):
import vim
(row, col) = vim.current.window.cursor
codelines = [vim.current.buffer[row - ix] for ix in range(rows_before, rows_after)]
return codelines
# --- INSERT TEXT CODE
def get_cursor_position():
import vim
(row, col) = vim.current.window.cursor
return row, col
class CursorContext(object):
"""
moves back to original position after context is done
"""
def __init__(self, offset=0):
self.pos = None
self.offset = offset
def __enter__(self):
self.pos = get_cursor_position()
return self
def __exit__(self, *exc_info):
row, col = self.pos
row += self.offset
move_cursor(row, col)
def close_matching_folds(pattern, search_range=None, limit=1):
"""
Looks in a range of lines for a pattern and executes a close fold command
anywhere that matches.
Example:
>>> import os, sys
>>> sys.path.append(os.path.expanduser('~/local/vim/rc'))
>>> import pyvim_funcs
>>> pyvim_funcs.dummy_import_vim(pyvim_funcs.__file__)
>>> import vim
>>> #pyvim_funcs.close_matching_folds('def ')
"""
import vim
if isinstance(search_range, (tuple, list)):
search_range = slice(*search_range)
if search_range is not None:
text = '\n'.join(vim.current.buffer[search_range])
offset = search_range.start
else:
text = '\n'.join(vim.current.buffer)
offset = 0
flags = re.MULTILINE | re.DOTALL
patre = re.compile(pattern, flags=flags)
# The context will remember and reset the current cursor position
# with CursorContext():
for count, match in enumerate(patre.finditer(text)):
if limit is not None and count >= limit:
break
# Find the matching line
lineno = text[:match.start()].count('\n') + offset + 1
# Move to the fold
move_cursor(lineno)
# close the fold
try:
vim.command(':foldclose')
except vim.error:
pass
def move_cursor(row, col=0):
import vim
vim.command('cal cursor({},{})'.format(row, col))
def insert_codeblock_over_selection(text):
import vim
buf = vim.current.buffer
# These are probably 1 based
(row1, col1) = buf.mark('<')
(row2, col2) = buf.mark('>')
insert_codeblock_between_lines(text, row1, row2)
#buffer_tail = vim.current.buffer[row2:] # Original end of the file
#lines = [line.encode('utf-8') for line in text.split('\n')]
#new_tail = lines + buffer_tail
#del(vim.current.buffer[row1 - 1:]) # delete old data
#vim.current.buffer.append(new_tail) # append new data
def insert_codeblock_between_lines(text, row1, row2):
import vim
buffer_tail = vim.current.buffer[row2:] # Original end of the file
lines = [line.encode('utf-8') for line in text.split('\n')]
new_tail = lines + buffer_tail
del(vim.current.buffer[row1 - 1:]) # delete old data
vim.current.buffer.append(new_tail) # append new data
# TODO:
#ut.insert_block_between_lines(text, row1, row2, vim.buffer, inplace=True)
def find_python_import_row():
"""
Find lines where import block begins (after __future__)
"""
in_comment = False
import vim
row = 0
for row, line in enumerate(vim.current.buffer):
if not in_comment:
if line.strip().startswith('#'):
pass
elif line.strip().startswith('"""'):
in_comment = '"'
elif line.strip().startswith("''''"):
in_comment = "'"
elif line.startswith('from __future__'):
pass
elif line.startswith('import'):
break
elif line.startswith('from'):
break
else:
break
else:
if line.strip().endswith(in_comment * 3):
in_comment = False
return row
def prepend_import_block(text):
import vim
row = find_python_import_row()
# FIXME: doesnt work right when row=0
buffer_tail = vim.current.buffer[row:]
lines = [line.encode('utf-8') for line in text.split('\n')]
print('lines = {!r}'.format(lines))
new_tail = lines + buffer_tail
del(vim.current.buffer[row:]) # delete old data
# vim's buffer __del__ method seems to not work when the slice is 0:None.
# It should remove everything, but it seems that one item still exists
# It seems we can never remove that last item, so we have to hack.
hackaway_row0 = row == 0 and len(vim.current.buffer) == 1
# print(len(vim.current.buffer))
# print('vim.current.buffer = {!r}'.format(vim.current.buffer[:]))
vim.current.buffer.append(new_tail) # append new data
if hackaway_row0:
del vim.current.buffer[0]
class DummyVimBuffer(object):
def __init__(self, _list):
self._list = _list
def __repr__(self):
return repr(self._list)
def __str__(self):
return str(self._list)
def __delitem__(self, idx):
del self._list[idx]
def __getitem__(self, idx):
return self._list[idx]
def append(self, item):
return self._list.extend(item)
def dummy_import_vim(fpath=None):
if fpath is not None:
fpath = abspath(expanduser(fpath))
try:
import vim
dohack = False
except ImportError:
dohack = True
vim = None
if vim is not None:
if getattr(vim, '__ishack__', False):
if fpath != vim.current.buffer.name:
dohack = True
if dohack:
import sys
import utool as ut
vim = ut.DynStruct()
vim.__ishack__ = True
vim.current = ut.DynStruct()
vim.current.window = ut.DynStruct()
vim.current.window.cursor = (0, 0)
if fpath is None:
lines = [
'line1',
'line2',
'line3',
]
else:
lines = ut.readfrom(fpath).splitlines()
vim.current.buffer = DummyVimBuffer(lines)
vim.current.buffer.name = fpath
# VERY HACKY
sys.modules['vim'] = vim
return vim
def _insert_codeblock(vim, text, pos):
"""
Example:
>>> import os, sys
>>> sys.path.append(os.path.expanduser('~/local/vim/rc'))
>>> from pyvim_funcs import *
>>> from pyvim_funcs import _insert_codeblock
>>> vim = dummy_import_vim()
>>> text = 'foobar'
>>> pos = 0
>>> _insert_codeblock(vim, text, pos)
>>> print(vim.current.buffer)
"""
lines = [line.encode('utf-8') for line in text.split('\n')]
buffer_tail = vim.current.buffer[pos:] # Original end of the file
new_tail = lines + buffer_tail # Prepend our data
del(vim.current.buffer[pos:]) # delete old data
print(type(vim.current.buffer))
vim.current.buffer.append(new_tail) # extend new data
def insert_codeblock_above_cursor(text):
"""
Inserts code into a vim buffer
"""
import vim
(row, col) = vim.current.window.cursor
pos = row - 1
# Rows are 1 indexed?
_insert_codeblock(vim, text, pos)
def insert_codeblock_under_cursor(text):
"""
Inserts code into a vim buffer
"""
import vim
(row, col) = vim.current.window.cursor
lines = [line.encode('utf-8') for line in text.split('\n')]
buffer_tail = vim.current.buffer[row:] # Original end of the file
new_tail = lines + buffer_tail # Prepend our data
del(vim.current.buffer[row:]) # delete old data
vim.current.buffer.append(new_tail) # extend new data
def append_text(text):
""" Appends to existing text in the current buffer with new text """
import vim
lines = text.split('\n')
vim.current.buffer.append(lines)
def overwrite_text(text):
""" Overwrites existing text in the current buffer with new text """
import vim
lines = text.split('\n')
del (vim.current.buffer[:])
vim.current.buffer.append(lines)
# --- Docstr Stuff
def get_current_fpath():
import vim
fpath = vim.current.buffer.name
return fpath
def is_module_pythonfile():
from os.path import splitext
import vim
modpath = vim.current.buffer.name
ext = splitext(modpath)[1]
ispyfile = ext == '.py'
verbose = False
if verbose:
print('is_module_pythonfile?')
print(' * modpath = %r' % (modpath,))
print(' * ext = %r' % (ext,))
print(' * ispyfile = %r' % (ispyfile,))
return ispyfile
def get_current_filetype():
import vim
# from os.path import splitext
# modpath = vim.current.buffer.name
# ext = splitext(modpath)[1]
filetype = vim.eval('&ft')
return filetype
def get_current_modulename():
"""
returns current module being edited
buffer_name = ub.truepath('~/local/vim/rc/pyvim_funcs.py')
"""
import vim
import ubelt as ub
buffer_name = vim.current.buffer.name
modname = ub.modpath_to_modname(buffer_name)
moddir, rel_modpath = ub.split_modpath(buffer_name)
# moddir = dirname(buffer_name)
return modname, moddir
def auto_cmdline():
import ubelt as ub
from xdoctest import static_analysis as static
import vim
modname, moddir = get_current_modulename()
findfunc_info = find_pyfunc_above_cursor()
funcname = findfunc_info['funcname']
if static.is_modname_importable(modname, exclude=['.']):
text = ub.codeblock(
'''
CommandLine:
xdoctest -m {modname} {funcname}
''').format(funcname=funcname, modname=modname)
else:
modpath = ub.compressuser(vim.current.buffer.name)
text = ub.codeblock(
'''
CommandLine:
xdoctest -m {modpath} {funcname}
''').format(funcname=funcname, modpath=modpath)
def get_indent(line):
"""
returns the preceding whitespace
"""
n_whitespace = len(line) - len(line.lstrip())
prefix = line[:n_whitespace]
return prefix
prefix = get_indent(findfunc_info['foundline'])
text = ub.indent(text, prefix + ' ')
return text
def auto_docstr(**kwargs):
import ubelt as ub
import vim
modname = None
funcname = None
flag = False
dbgtext = ''
docstr = ''
dbgmsg = ''
def make_docstr_block(header, block):
indented_block = '\n' + ub.indent(block)
docstr_block = ''.join([header, ':', indented_block])
return docstr_block
def new_autodoc(modname, funcname, moddir=None, modpath=None):
# get_indentation() # TODO
num_indent = 4
command = 'xdoctest -m {modname} {funcname}'.format(
**locals())
docstr = make_docstr_block('CommandLine', command)
docstr = ub.indent(docstr, ' ' * num_indent)
return docstr
try:
findfunc_info = find_pyfunc_above_cursor()
funcname = findfunc_info['funcname']
modname, moddir = get_current_modulename()
modpath = vim.current.buffer.name
print('modpath = {!r}'.format(modpath))
if funcname is None:
funcname = '[vimerr] UNKNOWN_FUNC: funcname is None'
flag = True
else:
# Text to insert into the current buffer
verbose = True
autodockw = dict(verbose=verbose)
autodockw.update(kwargs)
docstr = new_autodoc(modname, funcname, moddir=moddir,
modpath=modpath)
if docstr[:].strip() == 'error':
flag = True
except vim.error as ex:
dbgmsg = 'vim_error: ' + str(ex)
flag = False
except Exception as ex:
dbgmsg = 'exception(%r): %s' % (type(ex), str(ex))
print(repr(ex))
flag = False
if flag:
dbgtext += '\n+======================'
dbgtext += '\n| --- DEBUG OUTPUT --- '
if len(dbgmsg) > 0:
dbgtext += '\n| Message: '
dbgtext += dbgmsg
dbgtext += '\n+----------------------'
dbgtext += '\n| InsertDoctstr(modname=%r, funcname=%r' % (modname, funcname)
dbgtext += '\n| ' + ub.repr2(findfunc_info, nl=1)
dbgtext += '\nL----------------------'
elif len(dbgmsg) > 0:
dbgtext += '\n| Message: '
dbgtext += dbgmsg
text = '\n'.join([docstr + dbgtext])
if text == '':
print('No Text! For some reason flag=%r' % (flag,))
return text
def open_fpath(fpath, mode='e', nofoldenable=False, verbose=0):
"""
Execs new splits / tabs / etc
Weird this wont work with directories (on my machine):
https://superuser.com/questions/1243344/vim-wont-split-open-a-directory-from-python-but-it-works-interactively
Args:
fpath : file path to open
mode: how to open the new file
(valid options: split, vsplit, tabe, e, new, ...)
Ignore:
~/.bashrc
~/code
"""
import vim
fpath = expanduser(fpath)
if not exists(fpath):
print("FPATH DOES NOT EXIST")
# command = '{cmd} {fpath}'.format(cmd=cmd, fpath=fpath)
if isdir(fpath):
# Hack around directory problem
if mode.startswith('e'):
command = ':Explore! {fpath}'.format(fpath=fpath)
elif mode.startswith('sp'):
command = ':Hexplore! {fpath}'.format(fpath=fpath)
elif mode.startswith('vs'):
command = ':Vexplore! {fpath}'.format(fpath=fpath)
else:
raise NotImplementedError('implement fpath cmd for me')
else:
command = ":exec ':{mode} {fpath}'".format(mode=mode, fpath=fpath)
if verbose:
print('command = {!r}\n'.format(command))
try:
vim.command(command)
except Exception as ex:
print('FAILED TO OPEN PATH')
print('ex = {!r}'.format(ex))
raise
pass
if nofoldenable:
vim.command(":set nofoldenable")
def ensure_normalmode():
"""
References:
http://stackoverflow.com/questions/14013294/vim-how-to-detect-the-mode-in-which-the-user-is-in-for-statusline
"""
allmodes = {
'n' : 'Normal',
'no' : 'NOperatorPending',
'v' : 'Visual',
'V' : 'VLine',
#'^V' : 'VBlock',
's' : 'Select',
'S' : 'SLine',
#'^S' : 'SBlock',
'i' : 'Insert',
'R' : 'Replace',
'Rv' : 'VReplace',
'c' : 'Command',
'cv' : 'VimEx',
'ce' : 'Ex',
'r' : 'Prompt',
'rm' : 'More',
'r?' : 'Confirm',
'!' : 'Shell',
}
import vim
current_mode_code = vim.eval('mode()')
current_mode = allmodes.get(current_mode_code, current_mode_code)
if current_mode == 'Normal':
return
else:
print('current_mode_code = %r' % current_mode)
print('current_mode = %r' % current_mode)
#vim.command("ESC")
def open_fpath_list(fpath_list, num_hsplits=2):
"""
Very hacky function to nicely open a bunch of files
Not well tested
num_hsplits is for horizonatal splits
"""
import vim
from six.moves import range
index = 0
try:
assert index < len(fpath_list)
# First file opens new tab
open_fpath(fpath_list[index], mode='tabe')
index += 1
# Second file opens a vsplit
assert index < len(fpath_list)
open_fpath(fpath=fpath_list[index], mode='vsplit')
index += 1
if num_hsplits == 3:
assert index < len(fpath_list)
open_fpath(fpath=fpath_list[index], mode='vsplit')
index += 1
# The next 3 splits are horizontal splits
for index in range(index, index + 3):
assert index < len(fpath_list)
open_fpath(fpath=fpath_list[index], mode='split')
# Move to the left screen
vim.command(":exec ':wincmd l'")
# Continue doing horizontal splits
for index in range(index, index + 3):
assert index < len(fpath_list)
open_fpath(fpath=fpath_list[index], mode='split')
except AssertionError:
pass
if index < len(fpath_list):
print('WARNING: Too many files specified')
print('Can only handle %d' % index)
def vim_grep(pat, mode='normal', hashid=None):
import vim
import utool as ut
import ubelt as ub
ut.ENABLE_COLORS = False
ut.util_str.ENABLE_COLORS = False
if hashid is None:
hashid = ub.hash_data(pat)
print('Grepping for pattern = %r' % (pat,))
import os
def _grep_dpath(dpath):
grep_tup = ut.grep([pat], dpath_list=[dpath],
exclude_patterns=['*.pyc'], verbose=False)
reflags = 0
(found_fpath_list, found_lines_list, found_lxs_list) = grep_tup
regex_list = [pat]
_exprs_flags = [ut.util_regex.extend_regex2(expr, reflags)
for expr in regex_list]
extended_regex_list = ut.take_column(_exprs_flags, 0)
grep_result = ut.GrepResult(found_fpath_list, found_lines_list,
found_lxs_list, extended_regex_list,
reflags=reflags)
text = '\n'.join([
'Greping Directory "{}"'.format(dpath),
'tofind_list={}'.format(ub.repr2(extended_regex_list)),
grep_result.make_resultstr(colored=False),
'=============',
'found_fpath_list = {}'.format(ub.repr2(found_fpath_list, nl=1))
])
return text
if mode == 'normal':
text = _grep_dpath(os.getcwd())
elif mode == 'repo':
for path in ut.ancestor_paths(limit={'~/code', '~'}):
if exists(join(path, '.git')):
break
text = _grep_dpath(path)
elif mode == 'project':
msg_list = ut.grep_projects([pat], verbose=False, colored=False)
text = '\n'.join(msg_list)
else:
raise KeyError('unknown pyvim_funcs.vim_grep mode={}'.format(mode))
fname = 'tmp_grep_' + hashid + '.txt'
dpath = ub.ensure_app_cache_dir('pyvim_funcs')
fpath = join(dpath, fname)
# Display the text in a new vim split
open_fpath(fpath=fpath, mode='new')
overwrite_text(text)
vim.command(":exec ':w'")
def vim_argv(defaults=None):
import vim
nargs = int(vim.eval('a:0'))
argv = [vim.eval('a:{}'.format(i + 1)) for i in range(nargs)]
if defaults is not None:
# fill the remaining unspecified args with defaults
n_remain = len(defaults) - len(argv)
remain = defaults[-n_remain:]
argv += remain
return argv
def vim_popup_menu(options):
""" http://stackoverflow.com/questions/13537521/custom-popup-menu-in-vim """
import vim
import utool as ut
vim.command('echohl Title')
vim.command("echo 'Code fragments:'")
vim.command("echohl None")
id_list = ut.chr_range(len(options), base='1')
for id_, opt in zip(id_list, options):
vim.command("echo '%s. %s'" % (id_, opt))
vim.command("echo 'Enter the number of your choice '")
choice = chr(int(vim.eval('getchar()')))
print('choice = %r' % (choice,))
try:
chosen = options[int(choice) - 1]
except TypeError:
chosen = None
print('chosen = %r' % (chosen,))
return chosen
def ancestor_paths(start=None, limit={}):
"""
All paths above you
"""
limit = {expanduser(p) for p in limit}.union(set(limit))
if start is None:
start = os.getcwd()
path = start
prev = None
while path != prev and prev not in limit:
yield path
prev = path
path = os.path.dirname(path)
def search_candidate_paths(candidate_path_list, candidate_name_list=None,
priority_paths=None, required_subpaths=[],
verbose=None):
"""
searches for existing paths that meed a requirement
Args:
candidate_path_list (list): list of paths to check. If
candidate_name_list is specified this is the dpath list instead
candidate_name_list (list): specifies several names to check
(default = None)
priority_paths (None): specifies paths to check first.
Ignore candidate_name_list (default = None)
required_subpaths (list): specified required directory structure
(default = [])
verbose (bool): verbosity flag(default = True)
Returns:
str: return_path
CommandLine:
xdoctest -m utool.util_path --test-search_candidate_paths
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> candidate_path_list = [ub.truepath('~/RPI/code/utool'),
>>> ub.truepath('~/code/utool')]
>>> candidate_name_list = None
>>> required_subpaths = []
>>> verbose = True
>>> priority_paths = None
>>> return_path = search_candidate_paths(candidate_path_list,
>>> candidate_name_list,
>>> priority_paths, required_subpaths,
>>> verbose)
>>> result = ('return_path = %s' % (str(return_path),))
>>> print(result)
"""
if verbose is None:
verbose = 1
if verbose >= 1:
print('[search_candidate_paths] Searching for candidate paths')
if candidate_name_list is not None:
candidate_path_list_ = [join(dpath, fname) for dpath, fname in
it.product(candidate_path_list,
candidate_name_list)]
else:
candidate_path_list_ = candidate_path_list
if priority_paths is not None:
candidate_path_list_ = priority_paths + candidate_path_list_
return_path = None
for path in candidate_path_list_:
if path is not None and exists(path):
if verbose >= 2:
print('[search_candidate_paths] Found candidate directory %r' % (path,))
print('[search_candidate_paths] ... checking for approprate structure')
# tomcat directory exists. Make sure it also contains a webapps dir
subpath_list = [join(path, subpath) for subpath in required_subpaths]
if all(exists(path_) for path_ in subpath_list):
return_path = path
if verbose >= 2:
print('[search_candidate_paths] Found acceptable path')
return return_path
break
if verbose >= 1:
print('[search_candidate_paths] Failed to find acceptable path')
return return_path
def find_and_open_path(path, mode='split', verbose=0,
enable_python=True,
enable_url=True, enable_cli=True):
"""
Fancy-Find. Does some magic to try and find the correct path.
Currently supports:
* well-formed absolute and relatiave paths
* ill-formed relative paths when you are in a descendant directory
* python modules that exist in the PYTHONPATH
"""
import os
def try_open(path):
# base = '/home/joncrall/code/VIAME/packages/kwiver/sprokit/src/bindings/python/sprokit/pipeline'
# base = '/home'
if path and exists(path):
if verbose:
print('EXISTS path = {!r}\n'.format(path))
open_fpath(path, mode=mode, verbose=verbose)
return True
def expand_module(path):
# if True or filetype in {'py', 'pyx'}:
# filetype = get_current_filetype()
# import sys
# sys.executable
# import ubelt as ub
# print('ub = {!r}'.format(ub))
# xdoc = ub.import_module_from_path('/home/joncrall/code/xdoctest/xdoctest')
# print('xdoc = {!r}'.format(xdoc))
# print('sys.executable = {!r}'.format(sys.executable))
# print('sys.prefix = {!r}'.format(sys.prefix))
from xdoctest import static_analysis as static
# print('static = {!r}'.format(static))
try:
print('expand path = {!r}'.format(path))
path = static.modname_to_modpath(path)
print('expanded path = {!r}'.format(path))
# print('rectified module to path = {!r}'.format(path))
except Exception as ex:
print('ex = {!r}'.format(ex))
# if True or filetype in {'py', 'pyx'}:
return None
return path
def expand_module_prefix(path):
# TODO: we could parse the AST to figure out if the prefix is an alias
# for a known module.
from xdoctest import static_analysis as static
# Check if the path certainly looks like it could be a chain of python
# attribute accessors.
if re.match(r'^[\w\d_.]*$', path):
parts = path.split('.')
for i in reversed(range(len(parts))):
prefix = '.'.join(parts[:i])
path = static.modname_to_modpath(prefix)
if path is not None:
print('expanded prefix = {!r}'.format(path))
return path
print('expanded prefix = {!r}'.format(None))
return None
if enable_url:
# https://github.com/Erotemic
url = extract_url_embeding(path)
if is_url(url):
import webbrowser
browser = webbrowser.open(url)
# browser = webbrowser.get('google-chrome')
browser.open(url)
# ut.open_url_in_browser(url, 'google-chrome')
return
path = expanduser(path)
if try_open(path):
return
if try_open(os.path.expandvars(path)):
return
if enable_cli:
# Strip off the --argname= prefix
match = re.match(r'--[\w_]*=', path)
if match:
path = path[match.end():]
# path = 'sprokit/pipeline/pipeline.h'
# base = os.getcwd()
# base = '/home/joncrall/code/VIAME/packages/kwiver/sprokit/src/bindings/python/sprokit/pipeline'
if path.startswith('<') and path.endswith('>'):
path = path[1:-1]
if path.startswith('`') and path.endswith('`'):
path = path[1:-1]
if path.endswith(':'):
path = path[:-1]
path = os.path.expandvars(path)
path = expanduser(path) # expand again in case a prefix was removed
if try_open(path):
return
# Search downwards for relative paths
candidates = []
if not os.path.isabs(path):
limit = {'~', os.path.expanduser('~')}
start = os.getcwd()
candidates += list(ancestor_paths(start, limit=limit))
candidates += os.environ['PATH'].split(os.sep)
result = search_candidate_paths(candidates, [path], verbose=verbose)
if result is not None:
path = result
current_fpath = get_current_fpath()
if os.path.islink(current_fpath):
newbase = os.path.dirname(os.path.realpath(current_fpath))
resolved_path = os.path.join(newbase, path)
if try_open(resolved_path):
return
if try_open(path):
return
else:
print('enable_python = {!r}'.format(enable_python))
if enable_python:
pypath = expand_module(path)
print('pypath = {!r}'.format(pypath))
if try_open(pypath):
return
pypath = expand_module_prefix(path)
print('pypath = {!r}'.format(pypath))
if try_open(pypath):
return
if re.match(r'--\w*=.*', path):
# try and open if its a command line arg
stripped_path = expanduser(re.sub(r'--\w*=', '', path))
if try_open(stripped_path):
return
#vim.command('echoerr "Could not find path={}"'.format(path))
print('Could not find path={!r}'.format(path))
def extract_url_embeding(word):
"""
parse several common ways to embed url within a "word"
"""
# rst url embedding
if word.startswith('<') and word.endswith('>`_'):
word = word[1:-3]
# markdown url embedding
if word.startswith('[') and word.endswith(')'):
import parse
pres = parse.parse('[{tag}]({ref})', word)
if pres:
word = pres.named['ref']
return word
def getvar(key, default=None, context='g'):
""" gets the value of a vim variable and defaults if it does not exist """
import vim
varname = '{}:{}'.format(context, key)
var_exists = int(vim.eval('exists("{}")'.format(varname)))
if var_exists:
value = vim.eval('get({}:, "{}")'.format(context, key))
else:
value = default
return value
def wmctrl_terminal_pattern():
# Make sure regexes are bash escaped
terminal_pattern = getvar('vpy_terminal_pattern', default=None)
if terminal_pattern is None:
terminal_pattern = r'\|'.join([
'terminal',
re.escape('terminator.Terminator'), # gtk3 terminator
re.escape('x-terminal-emulator.X-terminal-emulator'), # gtk2 terminator
# other common terminal applications
'tilix',
'konsole',
'rxvt',
'terminology',
'xterm',
'tilda',
'Yakuake',
])
return terminal_pattern
def keypress(keys):
"""
Simulates keypress commands
"""
import vim
vim.command('call feedkeys("{}")'.format(keys))
def enter_text_in_terminal(text, return_to_vim=True):
"""
Takes a block of text, copies it to the clipboard, pastes it into the most
recently used terminal, presses enter (if needed) to run what presumably is
a command or script, and then returns to vim.
DEPRICATE:
use vimtk instead
TODO:
* User specified terminal pattern
* User specified paste keypress
* Allow usage from non-gui terminal vim.
(ensure we can detect if we are running in a terminal and
register our window as the active vim, and then paste into
the second mru terminal)
"""
import utool as ut
# Copy the text to the clipboard
copy_text_to_clipboard(text)
# Build xdtool script
import sys
if sys.platform.startswith('win32'):
print('win32 cannot copy to terminal yet. Just copied to clipboard. '
' Needs AHK support for motion?')
return
terminal_pattern = wmctrl_terminal_pattern()
# Sequence of key presses that will trigger a paste event
paste_keypress = 'ctrl+shift+v'
doscript = [
('remember_window_id', 'ACTIVE_GVIM'),
('focus', terminal_pattern),
('key', paste_keypress),
('key', 'KP_Enter'),
]
if '\n' in text:
# Press enter twice for multiline texts
doscript += [
('key', 'KP_Enter'),
]
if return_to_vim:
doscript += [
('focus_id', '$ACTIVE_GVIM'),
]
# execute script
ut.util_ubuntu.XCtrl.do(*doscript, sleeptime=.01)
#file=debug_file , verbose=DEBUG)
def is_url(text):
""" heuristic check if str is url formatted """
return any([
text.startswith('http://'),
text.startswith('https://'),
text.startswith('www.'),
'.org/' in text,
'.com/' in text,
])
def make_default_module_maintest(modpath, test_code=None, argv=None,
force_full=False):
"""
Args:
modname (str): module name
Returns:
str: text source code
CommandLine:
xdoctest -m utool.util_autogen --test-make_default_module_maintest
References:
http://legacy.python.org/dev/peps/pep-0338/
Example:
>>> import sys, ubelt as ub
>>> sys.path.append(ub.truepath('~/local/vim/rc/'))
>>> from pyvim_funcs import *
>>> import pyvim_funcs
>>> modpath = pyvim_funcs.__file__
>>> argv = None
>>> text = make_default_module_maintest(modpath)
>>> print(text)
"""
# if not use_modrun:
# if ub.WIN32:
# augpath = 'set PYTHONPATH=%PYTHONPATH%' + os.pathsep + moddir
# else:
# augpath = 'export PYTHONPATH=$PYTHONPATH' + os.pathsep + moddir
# cmdline = augpath + '\n' + cmdline
import ubelt as ub
from xdoctest import static_analysis as static
modname = static.modpath_to_modname(modpath)
moddir, rel_modpath = static.split_modpath(modpath)
if not force_full:
info = ub.cmd('python -c "import sys; print(sys.path)"')
default_path = eval(info['out'], {})
is_importable = static.is_modname_importable(modname, exclude=['.'],
sys_path=default_path)
if not force_full and is_importable:
cmdline = 'xdoctest -m ' + modname
else:
if ub.WIN32:
modpath = ub.compressuser(modpath, home='%HOME%')
cmdline = 'python -B ' + modpath.replace('\\', '/')
else:
modpath = ub.compressuser(modpath, home='~')
cmdline = 'python ' + modpath
if test_code is None:
test_code = ub.codeblock(
r'''
import xdoctest
xdoctest.doctest_module(__file__)
''')
if argv is None:
# argv = ['all']
argv = []
if argv is None:
argv = []
cmdline_ = ub.indent(cmdline + ' ' + ' '.join(argv), ' ' * 8).lstrip(' ')
test_code = ub.indent(test_code, ' ' * 4).lstrip(' ')
text = ub.codeblock(
r'''
if __name__ == '__main__':
{rr}"""
CommandLine:
{cmdline_}
"""
{test_code}
'''
).format(cmdline_=cmdline_, test_code=test_code, rr='{r}')
text = text.format(r='r' if '\\' in text else '')
return text
def format_text_as_docstr(text):
r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-format_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> result = ('formated_text = \n%s' % (str(formated_text),))
>>> print(result)
"""
min_indent = get_minimum_indentation(text)
indent_ = ' ' * min_indent
formated_text = re.sub('^' + indent_, '' + indent_ + '>>> ', text,
flags=re.MULTILINE)
formated_text = re.sub('^$', '' + indent_ + '>>> #', formated_text,
flags=re.MULTILINE)
return formated_text
def unformat_text_as_docstr(formated_text):
r"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py --test-unformat_text_as_docstr
Example:
>>> # DISABLE_DOCTEST
>>> from pyvim_funcs import * # NOQA
>>> text = testdata_text()
>>> formated_text = format_text_as_docstr(text)
>>> unformated_text = unformat_text_as_docstr(formated_text)
>>> result = ('unformated_text = \n%s' % (str(unformated_text),))
>>> print(result)
"""
min_indent = get_minimum_indentation(formated_text)
indent_ = ' ' * min_indent
unformated_text = re.sub('^' + indent_ + '>>> ', '' + indent_,
formated_text, flags=re.MULTILINE)
return unformated_text
def copy_text_to_clipboard(text):
"""
Copies text to the clipboard
CommandLine:
pip install pyperclip
sudo apt-get install xclip
sudo apt-get install xsel
References:
http://stackoverflow.com/questions/11063458/python-script-to-copy-text-to-clipboard
http://stackoverflow.com/questions/579687/how-do-i-copy-a-string-to-the-clipboard-on-windows-using-python
Ignore:
import pyperclip
# Qt is by far the fastest, followed by xsel, and then xclip
#
backend_order = ['xclip', 'xsel', 'qt', 'gtk']
backend_order = ['qt', 'xsel', 'xclip', 'gtk']
for be in backend_order:
print('be = %r' % (be,))
pyperclip.set_clipboard(be)
%timeit pyperclip.copy('a line of reasonable length text')
%timeit pyperclip.paste()
"""
import pyperclip
import ubelt as ub
def _check_clipboard_backend(backend):
if backend == 'qt':
try:
import PyQt5 # NOQA
return True
except ImportError:
return False
elif backend == 'gtk':
try:
import gtk # NOQA
return True
except ImportError:
return False
else:
return pyperclip._executable_exists(backend)
def _ensure_clipboard_backend():
# TODO: vimtk can do this, use that instead
if ub.POSIX:
backend_order = ['xclip', 'xsel', 'qt', 'gtk']
for backend in backend_order:
if getattr(pyperclip, '_hacked_clipboard', 'no') == backend:
break
elif _check_clipboard_backend(backend):
pyperclip.set_clipboard(backend)
pyperclip._hacked_clipboard = backend
break
else:
print('warning %r not installed' % (backend,))
_ensure_clipboard_backend()
pyperclip.copy(text)
# from Tkinter import Tk
# tk_inst = Tk()
# tk_inst.withdraw()
# tk_inst.clipboard_clear()
# tk_inst.clipboard_append(text)
# tk_inst.destroy()
def open_url_in_browser(url, browsername=None, fallback=False):
r"""
Opens a url in the specified or default browser
Args:
url (str): web url
CommandLine:
xdoctest -m utool.util_grabdata --test-open_url_in_browser
Example:
>>> # DISABLE_DOCTEST
>>> # SCRIPT
>>> url = 'http://www.jrsoftware.org/isdl.php'
>>> open_url_in_browser(url, 'chrome')
"""
import webbrowser
print('[utool] Opening url=%r in browser' % (url,))
if browsername is None:
browser = webbrowser.open(url)
else:
browser = get_prefered_browser(pref_list=[browsername], fallback=fallback)
return browser.open(url)
def get_prefered_browser(pref_list=[], fallback=True):
r"""
Args:
browser_preferences (list): (default = [])
fallback (bool): uses default if non of preferences work (default = True)
CommandLine:
xdoctest -m utool.util_grabdata --test-get_prefered_browser
Ignore:
import webbrowser
webbrowser._tryorder
pref_list = ['chrome', 'firefox', 'google-chrome']
pref_list = ['firefox', 'google-chrome']
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_grabdata import * # NOQA
>>> browser_preferences = ['firefox', 'chrome', 'safari']
>>> fallback = True
>>> browser = get_prefered_browser(browser_preferences, fallback)
>>> result = ('browser = %s' % (str(browser),))
>>> print(result)
>>> ut.quit_if_noshow()
"""
import webbrowser
import ubelt as ub
pref_list = pref_list if ub.iterable(pref_list) else [pref_list]
error_list = []
def listfind(list_, tofind):
try:
return list_.index(tofind)
except ValueError:
return None
# Hack for finding chrome on win32
if ub.WIN32:
# http://stackoverflow.com/questions/24873302/webbrowser-chrome-exe-does-not-work
win32_chrome_fpath = 'C:/Program Files (x86)/Google/Chrome/Application/chrome.exe'
win32_chrome_browsername = win32_chrome_fpath + ' %s'
win32_map = {
'chrome': win32_chrome_browsername,
'google-chrome': win32_chrome_browsername,
}
for browsername, win32_browsername in win32_map.items():
index = listfind(pref_list, browsername)
if index is not None and True:
pref_list.insert(index + 1, win32_browsername)
for browsername in pref_list:
try:
browser = webbrowser.get(browsername)
return browser
except webbrowser.Error as ex:
error_list.append(ex)
print(str(browsername) + ' failed. Reason: ' + str(ex))
if fallback:
browser = webbrowser
return browser
else:
raise AssertionError('No browser meets preferences=%r. error_list=%r' %
(pref_list, error_list,))
if __name__ == '__main__':
"""
CommandLine:
python ~/local/vim/rc/pyvim_funcs.py
"""
import xdoctest
xdoctest.doctest_module(__file__)
|
Erotemic/local
|
vim/rc/pyvim_funcs.py
|
Python
|
gpl-3.0
| 56,333
|
[
"Galaxy"
] |
f13ca7bf7a524d4dd9a52ada91ff9582895606c12813ea4bf91b666c8f92fe37
|
# coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the visualization code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from disentanglement_lib.utils import resources
import numpy as np
from PIL import Image
import scipy
from six.moves import range
import tensorflow.compat.v1 as tf
import imageio
def save_image(image, image_path):
"""Saves an image in the [0,1]-valued Numpy array to image_path.
Args:
image: Numpy array of shape (height, width, {1,3}) with values in [0, 1].
image_path: String with path to output image.
"""
# Copy the single channel if we are provided a grayscale image.
if image.shape[2] == 1:
image = np.repeat(image, 3, axis=2)
image = np.ascontiguousarray(image)
image *= 255.
image = image.astype("uint8")
with tf.gfile.Open(image_path, "wb") as path:
img = Image.fromarray(image, mode="RGB")
img.save(path)
def grid_save_images(images, image_path):
"""Saves images in list of [0,1]-valued np.arrays on a grid.
Args:
images: List of Numpy arrays of shape (height, width, {1,3}) with values in
[0, 1].
image_path: String with path to output image.
"""
side_length = int(math.floor(math.sqrt(len(images))))
image_rows = [
np.concatenate(
images[side_length * i:side_length * i + side_length], axis=0)
for i in range(side_length)
]
tiled_image = np.concatenate(image_rows, axis=1)
save_image(tiled_image, image_path)
def padded_grid(images, num_rows=None, padding_px=10, value=None):
"""Creates a grid with padding in between images."""
num_images = len(images)
if num_rows is None:
num_rows = best_num_rows(num_images)
# Computes how many empty images we need to add.
num_cols = int(np.ceil(float(num_images) / num_rows))
num_missing = num_rows * num_cols - num_images
# Add the empty images at the end.
all_images = images + [np.ones_like(images[0])] * num_missing
# Create the final grid.
rows = [padded_stack(all_images[i * num_cols:(i + 1) * num_cols], padding_px,
1, value=value) for i in range(num_rows)]
return padded_stack(rows, padding_px, axis=0, value=value)
def padded_stack(images, padding_px=10, axis=0, value=None):
"""Stacks images along axis with padding in between images."""
padding_arr = padding_array(images[0], padding_px, axis, value=value)
new_images = [images[0]]
for image in images[1:]:
new_images.append(padding_arr)
new_images.append(image)
return np.concatenate(new_images, axis=axis)
def padding_array(image, padding_px, axis, value=None):
"""Creates padding image of proper shape to pad image along the axis."""
shape = list(image.shape)
shape[axis] = padding_px
if value is None:
return np.ones(shape, dtype=image.dtype)
else:
assert len(value) == shape[-1]
shape[-1] = 1
return np.tile(value, shape)
def best_num_rows(num_elements, max_ratio=4):
"""Automatically selects a smart number of rows."""
best_remainder = num_elements
best_i = None
i = int(np.sqrt(num_elements))
while True:
if num_elements > max_ratio * i * i:
return best_i
remainder = (i - num_elements % i) % i
if remainder == 0:
return i
if remainder < best_remainder:
best_remainder = remainder
best_i = i
i -= 1
def pad_around(image, padding_px=10, axis=None, value=None):
"""Adds a padding around each image."""
# If axis is None, pad both the first and the second axis.
if axis is None:
image = pad_around(image, padding_px, axis=0, value=value)
axis = 1
padding_arr = padding_array(image, padding_px, axis, value=value)
return np.concatenate([padding_arr, image, padding_arr], axis=axis)
def add_below(image, padding_px=10, value=None):
"""Adds a footer below."""
if len(image.shape) == 2:
image = np.expand_dims(image, -1)
if image.shape[2] == 1:
image = np.repeat(image, 3, 2)
if image.shape[2] != 3:
raise ValueError("Could not convert image to have three channels.")
with tf.gfile.Open(resources.get_file("disentanglement_lib.png"), "rb") as f:
footer = np.array(Image.open(f).convert("RGB")) * 1.0 / 255.
missing_px = image.shape[1] - footer.shape[1]
if missing_px < 0:
return image
if missing_px > 0:
padding_arr = padding_array(footer, missing_px, axis=1, value=value)
footer = np.concatenate([padding_arr, footer], axis=1)
return padded_stack([image, footer], padding_px, axis=0, value=value)
def save_animation(list_of_animated_images, image_path, fps):
full_size_images = []
for single_images in zip(*list_of_animated_images):
full_size_images.append(
pad_around(add_below(padded_grid(list(single_images)))))
imageio.mimwrite(image_path, full_size_images, fps=fps)
def cycle_factor(starting_index, num_indices, num_frames):
"""Cycles through the state space in a single cycle."""
grid = np.linspace(starting_index, starting_index + 2*num_indices,
num=num_frames, endpoint=False)
grid = np.array(np.ceil(grid), dtype=np.int64)
grid -= np.maximum(0, 2*grid - 2*num_indices + 1)
grid += np.maximum(0, -2*grid - 1)
return grid
def cycle_gaussian(starting_value, num_frames, loc=0., scale=1.):
"""Cycles through the quantiles of a Gaussian in a single cycle."""
starting_prob = scipy.stats.norm.cdf(starting_value, loc=loc, scale=scale)
grid = np.linspace(starting_prob, starting_prob + 2.,
num=num_frames, endpoint=False)
grid -= np.maximum(0, 2*grid - 2)
grid += np.maximum(0, -2*grid)
grid = np.minimum(grid, 0.999)
grid = np.maximum(grid, 0.001)
return np.array([scipy.stats.norm.ppf(i, loc=loc, scale=scale) for i in grid])
def cycle_interval(starting_value, num_frames, min_val, max_val):
"""Cycles through the state space in a single cycle."""
starting_in_01 = (starting_value - min_val)/(max_val - min_val)
grid = np.linspace(starting_in_01, starting_in_01 + 2.,
num=num_frames, endpoint=False)
grid -= np.maximum(0, 2*grid - 2)
grid += np.maximum(0, -2*grid)
return grid * (max_val - min_val) + min_val
|
google-research/disentanglement_lib
|
disentanglement_lib/visualize/visualize_util.py
|
Python
|
apache-2.0
| 6,783
|
[
"Gaussian"
] |
fe2f12187a56ca96c3e0de6736eee1aee0e16dfd6522449025d5688ba8d6a745
|
"""Visitor/traversal interface and library functions.
SQLAlchemy schema and expression constructs rely on a Python-centric
version of the classic "visitor" pattern as the primary way in which
they apply functionality. The most common use of this pattern
is statement compilation, where individual expression classes match
up to rendering methods that produce a string result. Beyond this,
the visitor system is also used to inspect expressions for various
information and patterns, as well as for usage in
some kinds of expression transformation. Other kinds of transformation
use a non-visitor traversal system.
For many examples of how the visit system is used, see the
sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules.
For an introduction to clause adaption, see
http://techspot.zzzeek.org/?p=19 .
"""
from collections import deque
import re
from sqlalchemy import util
import operator
__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor',
'CloningVisitor', 'ReplacingCloningVisitor', 'iterate',
'iterate_depthfirst', 'traverse_using', 'traverse',
'cloned_traverse', 'replacement_traverse']
class VisitableType(type):
"""Metaclass which checks for a `__visit_name__` attribute and
applies `_compiler_dispatch` method to classes.
"""
def __init__(cls, clsname, bases, clsdict):
if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'):
super(VisitableType, cls).__init__(clsname, bases, clsdict)
return
# set up an optimized visit dispatch function
# for use by the compiler
visit_name = cls.__visit_name__
if isinstance(visit_name, str):
getter = operator.attrgetter("visit_%s" % visit_name)
def _compiler_dispatch(self, visitor, **kw):
return getter(visitor)(self, **kw)
else:
def _compiler_dispatch(self, visitor, **kw):
return getattr(visitor, 'visit_%s' % self.__visit_name__)(self, **kw)
cls._compiler_dispatch = _compiler_dispatch
super(VisitableType, cls).__init__(clsname, bases, clsdict)
class Visitable(object):
"""Base class for visitable objects, applies the
``VisitableType`` metaclass.
"""
__metaclass__ = VisitableType
class ClauseVisitor(object):
"""Base class for visitor objects which can traverse using
the traverse() function.
"""
__traverse_options__ = {}
def traverse_single(self, obj):
for v in self._visitor_iterator:
meth = getattr(v, "visit_%s" % obj.__visit_name__, None)
if meth:
return meth(obj)
def iterate(self, obj):
"""traverse the given expression structure, returning an iterator of all elements."""
return iterate(obj, self.__traverse_options__)
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return traverse(obj, self.__traverse_options__, self._visitor_dict)
@util.memoized_property
def _visitor_dict(self):
visitors = {}
for name in dir(self):
if name.startswith('visit_'):
visitors[name[6:]] = getattr(self, name)
return visitors
@property
def _visitor_iterator(self):
"""iterate through this visitor and each 'chained' visitor."""
v = self
while v:
yield v
v = getattr(v, '_next', None)
def chain(self, visitor):
"""'chain' an additional ClauseVisitor onto this ClauseVisitor.
the chained visitor will receive all visit events after this one.
"""
tail = list(self._visitor_iterator)[-1]
tail._next = visitor
return self
class CloningVisitor(ClauseVisitor):
"""Base class for visitor objects which can traverse using
the cloned_traverse() function.
"""
def copy_and_process(self, list_):
"""Apply cloned traversal to the given list of elements, and return the new list."""
return [self.traverse(x) for x in list_]
def traverse(self, obj):
"""traverse and visit the given expression structure."""
return cloned_traverse(obj, self.__traverse_options__, self._visitor_dict)
class ReplacingCloningVisitor(CloningVisitor):
"""Base class for visitor objects which can traverse using
the replacement_traverse() function.
"""
def replace(self, elem):
"""receive pre-copied elements during a cloning traversal.
If the method returns a new element, the element is used
instead of creating a simple copy of the element. Traversal
will halt on the newly returned element if it is re-encountered.
"""
return None
def traverse(self, obj):
"""traverse and visit the given expression structure."""
def replace(elem):
for v in self._visitor_iterator:
e = v.replace(elem)
if e is not None:
return e
return replacement_traverse(obj, self.__traverse_options__, replace)
def iterate(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be breadth-first.
"""
stack = deque([obj])
while stack:
t = stack.popleft()
yield t
for c in t.get_children(**opts):
stack.append(c)
def iterate_depthfirst(obj, opts):
"""traverse the given expression structure, returning an iterator.
traversal is configured to be depth-first.
"""
stack = deque([obj])
traversal = deque()
while stack:
t = stack.pop()
traversal.appendleft(t)
for c in t.get_children(**opts):
stack.append(c)
return iter(traversal)
def traverse_using(iterator, obj, visitors):
"""visit the given expression structure using the given iterator of objects."""
for target in iterator:
meth = visitors.get(target.__visit_name__, None)
if meth:
meth(target)
return obj
def traverse(obj, opts, visitors):
"""traverse and visit the given expression structure using the default iterator."""
return traverse_using(iterate(obj, opts), obj, visitors)
def traverse_depthfirst(obj, opts, visitors):
"""traverse and visit the given expression structure using the depth-first iterator."""
return traverse_using(iterate_depthfirst(obj, opts), obj, visitors)
def cloned_traverse(obj, opts, visitors):
"""clone the given expression structure, allowing modifications by visitors."""
cloned = util.column_dict()
def clone(element):
if element not in cloned:
cloned[element] = element._clone()
return cloned[element]
obj = clone(obj)
stack = [obj]
while stack:
t = stack.pop()
if t in cloned:
continue
t._copy_internals(clone=clone)
meth = visitors.get(t.__visit_name__, None)
if meth:
meth(t)
for c in t.get_children(**opts):
stack.append(c)
return obj
def replacement_traverse(obj, opts, replace):
"""clone the given expression structure, allowing element replacement by a given replacement function."""
cloned = util.column_dict()
stop_on = util.column_set(opts.get('stop_on', []))
def clone(element):
newelem = replace(element)
if newelem is not None:
stop_on.add(newelem)
return newelem
if element not in cloned:
cloned[element] = element._clone()
return cloned[element]
obj = clone(obj)
stack = [obj]
while stack:
t = stack.pop()
if t in stop_on:
continue
t._copy_internals(clone=clone)
for c in t.get_children(**opts):
stack.append(c)
return obj
|
obeattie/sqlalchemy
|
lib/sqlalchemy/sql/visitors.py
|
Python
|
mit
| 7,999
|
[
"VisIt"
] |
61fdf5b4a724a2dd4097e6028a2a1b0096cf20bc0d12d751277bd095cc3628e2
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyscf import lib
from pyscf import scf
from pyscf import gto
from pyscf import grad
from pyscf.grad import dhf
h2o = gto.Mole()
h2o.verbose = 5
h2o.output = '/dev/null'
h2o.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
h2o.basis = {"H": '6-31g',
"O": '6-31g',}
h2o.build()
def tearDownModule():
global h2o
h2o.stdout.close()
del h2o
class KnownValues(unittest.TestCase):
def test_dhf_grad_with_ssss_high_cost(self):
with lib.light_speed(30):
mf = scf.DHF(h2o).run(conv_tol=1e-12)
g = mf.nuc_grad_method().kernel()
self.assertAlmostEqual(lib.finger(g), 0.0074947016737157545, 7)
ms = mf.as_scanner()
pmol = h2o.copy()
e1 = ms(pmol.set_geom_([["O" , (0. , 0. ,-0.001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]], unit='Ang'))
e2 = ms(pmol.set_geom_([["O" , (0. , 0. , 0.001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]], unit='Ang'))
self.assertAlmostEqual(g[0,2], (e2-e1)/0.002*lib.param.BOHR, 5)
# def test_dhf_grad_without_ssss(self):
# with lib.light_speed(30):
# mf = scf.DHF(h2o).set(with_ssss=False).run()
# g = mf.nuc_grad_method().kernel() # NotImplemented
# self.assertAlmostEqual(lib.finger(g), 0.035838032078025273, 7)
#
# ms = mf.as_scanner()
# pmol = h2o.copy()
# e1 = ms(pmol.set_geom_([["O" , (0. , 0. ,-0.001)],
# [1 , (0. , -0.757 , 0.587)],
# [1 , (0. , 0.757 , 0.587)]], unit='Ang'))
# e2 = ms(pmol.set_geom_([["O" , (0. , 0. , 0.001)],
# [1 , (0. , -0.757 , 0.587)],
# [1 , (0. , 0.757 , 0.587)]], unit='Ang'))
# self.assertAlmostEqual(g[0,2], (e2-e1)/0.002*lib.param.BOHR, 6)
if __name__ == "__main__":
print("Full Tests for DHF")
unittest.main()
|
gkc1000/pyscf
|
pyscf/grad/test/test_dhf.py
|
Python
|
apache-2.0
| 2,886
|
[
"PySCF"
] |
53dc1bf02e730616678d91663a8c25472fcf38b0091908992b82ba131df5c268
|
#!/usr/bin/env python
import os
try:
__IPYTHON__
import sys
del sys.argv[1:]
except:
pass
import srwl_bl
import srwlib
import srwlpy
import math
import srwl_uti_smp
def set_optics(v=None):
el = []
pp = []
names = ['ApM1', 'M1', 'M1_Watchpoint', 'Watchpoint', 'ApKB', 'VFM', 'VFM_HFM', 'HFM', 'HFM_Sample', 'Sample']
for el_name in names:
if el_name == 'ApM1':
# ApM1: aperture 270.0m
el.append(srwlib.SRWLOptA(
_shape=v.op_ApM1_shape,
_ap_or_ob='a',
_Dx=v.op_ApM1_Dx,
_Dy=v.op_ApM1_Dy,
_x=v.op_ApM1_x,
_y=v.op_ApM1_y,
))
pp.append(v.op_ApM1_pp)
elif el_name == 'M1':
# M1: mirror 270.0m
mirror_file = v.op_M1_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by M1 beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_M1_dim,
_ang=abs(v.op_M1_ang),
_amp_coef=v.op_M1_amp_coef,
_size_x=v.op_M1_size_x,
_size_y=v.op_M1_size_y,
))
pp.append(v.op_M1_pp)
elif el_name == 'M1_Watchpoint':
# M1_Watchpoint: drift 270.0m
el.append(srwlib.SRWLOptD(
_L=v.op_M1_Watchpoint_L,
))
pp.append(v.op_M1_Watchpoint_pp)
elif el_name == 'Watchpoint':
# Watchpoint: watch 928.3m
pass
elif el_name == 'ApKB':
# ApKB: aperture 928.3m
el.append(srwlib.SRWLOptA(
_shape=v.op_ApKB_shape,
_ap_or_ob='a',
_Dx=v.op_ApKB_Dx,
_Dy=v.op_ApKB_Dy,
_x=v.op_ApKB_x,
_y=v.op_ApKB_y,
))
pp.append(v.op_ApKB_pp)
elif el_name == 'VFM':
# VFM: ellipsoidMirror 928.3m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_VFM_p,
_q=v.op_VFM_q,
_ang_graz=v.op_VFM_ang,
_size_tang=v.op_VFM_size_tang,
_size_sag=v.op_VFM_size_sag,
_nvx=v.op_VFM_nvx,
_nvy=v.op_VFM_nvy,
_nvz=v.op_VFM_nvz,
_tvx=v.op_VFM_tvx,
_tvy=v.op_VFM_tvy,
_x=v.op_VFM_x,
_y=v.op_VFM_y,
))
pp.append(v.op_VFM_pp)
mirror_file = v.op_VFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by VFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_VFM_dim,
_ang=abs(v.op_VFM_ang),
_amp_coef=v.op_VFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'VFM_HFM':
# VFM_HFM: drift 928.3m
el.append(srwlib.SRWLOptD(
_L=v.op_VFM_HFM_L,
))
pp.append(v.op_VFM_HFM_pp)
elif el_name == 'HFM':
# HFM: ellipsoidMirror 928.9m
el.append(srwlib.SRWLOptMirEl(
_p=v.op_HFM_p,
_q=v.op_HFM_q,
_ang_graz=v.op_HFM_ang,
_size_tang=v.op_HFM_size_tang,
_size_sag=v.op_HFM_size_sag,
_nvx=v.op_HFM_nvx,
_nvy=v.op_HFM_nvy,
_nvz=v.op_HFM_nvz,
_tvx=v.op_HFM_tvx,
_tvy=v.op_HFM_tvy,
_x=v.op_HFM_x,
_y=v.op_HFM_y,
))
pp.append(v.op_HFM_pp)
mirror_file = v.op_HFM_hfn
assert os.path.isfile(mirror_file), \
'Missing input file {}, required by HFM beamline element'.format(mirror_file)
el.append(srwlib.srwl_opt_setup_surf_height_1d(
srwlib.srwl_uti_read_data_cols(mirror_file, "\t", 0, 1),
_dim=v.op_HFM_dim,
_ang=abs(v.op_HFM_ang),
_amp_coef=v.op_HFM_amp_coef,
))
pp.append([0, 0, 1.0, 0, 0, 1.0, 1.0, 1.0, 1.0])
elif el_name == 'HFM_Sample':
# HFM_Sample: drift 928.9m
el.append(srwlib.SRWLOptD(
_L=v.op_HFM_Sample_L,
))
pp.append(v.op_HFM_Sample_pp)
elif el_name == 'Sample':
# Sample: watch 930.0m
pass
pp.append(v.op_fin_pp)
return srwlib.SRWLOptC(el, pp)
varParam = [
['name', 's', 'Gaussian X-ray beam through a Beamline containing Imperfect Mirrors', 'simulation name'],
#---Data Folder
['fdir', 's', '', 'folder (directory) name for reading-in input and saving output data files'],
['gbm_x', 'f', 0.0, 'average horizontal coordinates of waist [m]'],
['gbm_y', 'f', 0.0, 'average vertical coordinates of waist [m]'],
['gbm_z', 'f', 0.0, 'average longitudinal coordinate of waist [m]'],
['gbm_xp', 'f', 0.0, 'average horizontal angle at waist [rad]'],
['gbm_yp', 'f', 0.0, 'average verical angle at waist [rad]'],
['gbm_ave', 'f', 12400.0, 'average photon energy [eV]'],
['gbm_pen', 'f', 0.001, 'energy per pulse [J]'],
['gbm_rep', 'f', 1, 'rep. rate [Hz]'],
['gbm_pol', 'f', 1, 'polarization 1- lin. hor., 2- lin. vert., 3- lin. 45 deg., 4- lin.135 deg., 5- circ. right, 6- circ. left'],
['gbm_sx', 'f', 9.787229999999999e-06, 'rms beam size vs horizontal position [m] at waist (for intensity)'],
['gbm_sy', 'f', 9.787229999999999e-06, 'rms beam size vs vertical position [m] at waist (for intensity)'],
['gbm_st', 'f', 1e-14, 'rms pulse duration [s] (for intensity)'],
['gbm_mx', 'f', 0, 'transverse Gauss-Hermite mode order in horizontal direction'],
['gbm_my', 'f', 0, 'transverse Gauss-Hermite mode order in vertical direction'],
['gbm_ca', 's', 'c', 'treat _sigX, _sigY as sizes in [m] in coordinate representation (_presCA="c") or as angular divergences in [rad] in angular representation (_presCA="a")'],
['gbm_ft', 's', 't', 'treat _sigT as pulse duration in [s] in time domain/representation (_presFT="t") or as bandwidth in [eV] in frequency domain/representation (_presFT="f")'],
#---Calculation Types
# Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0.0, 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0.0, 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 10000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', '', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100.0, 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000.0, 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0.0, 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0.0, 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 0, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_ft', 's', 'f', 'presentation/domain: "f"- frequency (photon energy), "t"- time'],
['ss_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', '', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100.0, 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000.0, 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0.0, 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0.0, 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into account for multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1.0, 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1.0, 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_meth', 'i', -1, 'method to use for spectrum vs photon energy calculation in case of arbitrary input magnetic field: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler", -1- dont use this accurate integration method (rather use approximate if possible)'],
['sm_prec', 'f', 0.01, 'relative precision for spectrum vs photon energy calculation in case of arbitrary input magnetic field (nominal value is 0.01)'],
['sm_nm', 'i', 1, 'number of macro-electrons for calculation of spectrum in case of arbitrary input magnetic field'],
['sm_na', 'i', 5, 'number of macro-electrons to average on each node at parallel (MPI-based) calculation of spectrum in case of arbitrary input magnetic field'],
['sm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons) for intermediate intensity at calculation of multi-electron spectrum in case of arbitrary input magnetic field'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', '', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0.0, 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0.0, 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1.0, 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zst', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zfi', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', '', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 12400.0, 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1.0, 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0.0, 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 0.00175601622, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0.0, 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 0.00175601622, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 2.0, 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 2, 'method to use for calculation of intensity distribution vs horizontal and vertical position: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_u', 'i', 1, 'electric field units: 0- arbitrary, 1- sqrt(Phot/s/0.1%bw/mm^2), 2- sqrt(J/eV/mm^2) or sqrt(W/mm^2), depending on representation (freq. or time)'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['si_pl', 's', '', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', '', 'plot the resulting intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 1000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node for parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y; 40- intensity(s0), mutual intensity cuts and degree of coherence vs X & Y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0.0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0.0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_am', 'i', 0, 'multi-electron integration approximation method: 0- no approximation (use the standard 5D integration method), 1- integrate numerically only over e-beam energy spread and use convolution to treat transverse emittance'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
['wm_fbk', '', '', 'create backup file(s) with propagated multi-e intensity distribution vs horizontal and vertical position and other radiation characteristics', 'store_true'],
#to add options
['op_r', 'f', 20.0, 'longitudinal position of the first optical element [m]'],
# Former appParam:
['rs_type', 's', 'g', 'source type, (u) idealized undulator, (t), tabulated undulator, (m) multipole, (g) gaussian beam'],
#---Beamline optics:
# ApM1: aperture
['op_ApM1_shape', 's', 'r', 'shape'],
['op_ApM1_Dx', 'f', 0.01, 'horizontalSize'],
['op_ApM1_Dy', 'f', 0.0009, 'verticalSize'],
['op_ApM1_x', 'f', 0.0, 'horizontalOffset'],
['op_ApM1_y', 'f', 0.0, 'verticalOffset'],
# M1: mirror
['op_M1_hfn', 's', 'mirror2_1d.dat', 'heightProfileFile'],
['op_M1_dim', 's', 'y', 'orientation'],
['op_M1_ang', 'f', 0.0018, 'grazingAngle'],
['op_M1_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_M1_size_x', 'f', 0.0, 'horizontalTransverseSize'],
['op_M1_size_y', 'f', 0.0, 'verticalTransverseSize'],
# M1_Watchpoint: drift
['op_M1_Watchpoint_L', 'f', 658.3, 'length'],
# ApKB: aperture
['op_ApKB_shape', 's', 'r', 'shape'],
['op_ApKB_Dx', 'f', 0.0018, 'horizontalSize'],
['op_ApKB_Dy', 'f', 0.0018, 'verticalSize'],
['op_ApKB_x', 'f', 0.0, 'horizontalOffset'],
['op_ApKB_y', 'f', 0.0, 'verticalOffset'],
# VFM: ellipsoidMirror
['op_VFM_hfn', 's', 'mirror2_1d.dat', 'heightProfileFile'],
['op_VFM_dim', 's', 'y', 'orientation'],
['op_VFM_p', 'f', 928.3, 'firstFocusLength'],
['op_VFM_q', 'f', 1.7, 'focalLength'],
['op_VFM_ang', 'f', 0.0036, 'grazingAngle'],
['op_VFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_VFM_size_tang', 'f', 0.5, 'tangentialSize'],
['op_VFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_VFM_nvx', 'f', 0.0, 'normalVectorX'],
['op_VFM_nvy', 'f', 0.9999935200069984, 'normalVectorY'],
['op_VFM_nvz', 'f', -0.0035999922240050387, 'normalVectorZ'],
['op_VFM_tvx', 'f', 0.0, 'tangentialVectorX'],
['op_VFM_tvy', 'f', -0.0035999922240050387, 'tangentialVectorY'],
['op_VFM_x', 'f', 0.0, 'horizontalOffset'],
['op_VFM_y', 'f', 0.0, 'verticalOffset'],
# VFM_HFM: drift
['op_VFM_HFM_L', 'f', 0.6000000000000227, 'length'],
# HFM: ellipsoidMirror
['op_HFM_hfn', 's', 'mirror2_1d.dat', 'heightProfileFile'],
['op_HFM_dim', 's', 'x', 'orientation'],
['op_HFM_p', 'f', 928.9, 'firstFocusLength'],
['op_HFM_q', 'f', 1.1, 'focalLength'],
['op_HFM_ang', 'f', 0.0036, 'grazingAngle'],
['op_HFM_amp_coef', 'f', 1.0, 'heightAmplification'],
['op_HFM_size_tang', 'f', 0.5, 'tangentialSize'],
['op_HFM_size_sag', 'f', 0.01, 'sagittalSize'],
['op_HFM_nvx', 'f', 0.9999935200069984, 'normalVectorX'],
['op_HFM_nvy', 'f', 0.0, 'normalVectorY'],
['op_HFM_nvz', 'f', -0.0035999922240050387, 'normalVectorZ'],
['op_HFM_tvx', 'f', -0.0035999922240050387, 'tangentialVectorX'],
['op_HFM_tvy', 'f', 0.0, 'tangentialVectorY'],
['op_HFM_x', 'f', 0.0, 'horizontalOffset'],
['op_HFM_y', 'f', 0.0, 'verticalOffset'],
# HFM_Sample: drift
['op_HFM_Sample_L', 'f', 1.1000000000000227, 'length'],
#---Propagation parameters
['op_ApM1_pp', 'f', [0, 0, 1.0, 1, 0, 2.0, 5.0, 2.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'ApM1'],
['op_M1_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M1'],
['op_M1_Watchpoint_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'M1_Watchpoint'],
['op_ApKB_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'ApKB'],
['op_VFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM'],
['op_VFM_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'VFM_HFM'],
['op_HFM_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM'],
['op_HFM_Sample_pp', 'f', [0, 0, 1.0, 1, 0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'HFM_Sample'],
['op_fin_pp', 'f', [0, 0, 1.0, 1, 0, 0.06, 3.0, 0.1, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
def main():
v = srwl_bl.srwl_uti_parse_options(srwl_bl.srwl_uti_ext_options(varParam), use_sys_argv=True)
op = set_optics(v)
v.si = True
v.si_pl = 'xy'
v.ws = True
v.ws_pl = 'xy'
mag = None
if v.rs_type == 'm':
mag = srwlib.SRWLMagFldC()
mag.arXc.append(0)
mag.arYc.append(0)
mag.arMagFld.append(srwlib.SRWLMagFldM(v.mp_field, v.mp_order, v.mp_distribution, v.mp_len))
mag.arZc.append(v.mp_zc)
srwl_bl.SRWLBeamline(_name=v.name, _mag_approx=mag).calc_all(v, op)
main()
|
mkeilman/sirepo
|
tests/template/srw_generate_data/gaussian-x-ray-beam-through-a-beamline-containing-imperfect-mirrors.py
|
Python
|
apache-2.0
| 26,637
|
[
"Gaussian"
] |
ee54682e8dd80275b5eb755b59d0132c041abf6d793954650cbbc9e07dd08637
|
r"""
Definition
----------
This model describes a Gaussian shaped peak on a flat background
.. math::
I(q) = (\text{scale}) \exp\left[ -\tfrac12 (q-q_0)^2 / \sigma^2 \right]
+ \text{background}
with the peak having height of *scale* centered at $q_0$ and having a standard
deviation of $\sigma$. The FWHM (full-width half-maximum) is $2.354 \sigma$.
For 2D data, scattering intensity is calculated in the same way as 1D,
where the $q$ vector is defined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
None.
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf
name = "gaussian_peak"
title = "Gaussian shaped peak"
description = """
Model describes a Gaussian shaped peak including a flat background
Provide F(q) = scale*exp( -1/2 *[(q-peak_pos)/sigma]^2 )+ background
"""
category = "shape-independent"
# ["name", "units", default, [lower, upper], "type","description"],
parameters = [["peak_pos", "1/Ang", 0.05, [-inf, inf], "", "Peak position"],
["sigma", "1/Ang", 0.005, [0, inf], "",
"Peak width (standard deviation)"],
]
Iq = """
double scaled_dq = (q - peak_pos)/sigma;
return exp(-0.5*scaled_dq*scaled_dq); //sqrt(2*M_PI*sigma*sigma);
"""
def random():
"""Return a random parameter set for the model."""
peak_pos = 10**np.random.uniform(-3, -1)
sigma = 10**np.random.uniform(-1.3, -0.3)*peak_pos
scale = 10**np.random.uniform(0, 4)
pars = dict(
#background=1e-8,
scale=scale,
peak_pos=peak_pos,
sigma=sigma,
)
return pars
|
SasView/sasmodels
|
sasmodels/models/gaussian_peak.py
|
Python
|
bsd-3-clause
| 1,720
|
[
"Gaussian"
] |
46afaa704097e6fbf64f2540e2a0102187dd00a3f345d0813c5eec1ff296d635
|
import os
import logging
from sklearn.cluster import KMeans, SpectralClustering, DBSCAN, MeanShift,\
Birch, AffinityPropagation, AgglomerativeClustering
# Type checkers taken from here. http://stackoverflow.com/questions/25039626/find-numeric-columns-in-pandas-python
def is_type(df, baseType):
import numpy as np
import pandas as pd
test = [issubclass(np.dtype(d).type, baseType) for d in df.dtypes]
return pd.DataFrame(data = test, index = df.columns, columns = ["test"])
def calculate_anova(df, targetCol, sourceCol):
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
lm = ols('%s ~ C(%s, Sum) + c'% (targetCol, sourceCol),
data=df).fit()
table = anova_lm(lm, typ=2)
return table
def is_float(df):
import numpy as np
return is_type(df, np.float)
def is_number(df):
import numpy as np
return is_type(df, np.number)
def is_integer(df):
import numpy as np
return is_type(df, np.integer)
def chunks(combos, size=9):
for i in range(0, len(combos), size):
yield combos[i:i + size]
# Sigh lightgbm insist this is the only wa
os.environ['LIGHTGBM_EXEC'] = os.path.join(os.getenv("HOME"), 'bin', 'lightgbm')
def get_model_obj(modelType, n_clusters=None, **kwargs):
if modelType == 'knn':
from sklearn.neighbors import KNeighborsClassifier
# 6 seems to give the best trade-off between accuracy and precision
knn = KNeighborsClassifier(n_neighbors=6, **kwargs)
return knn
elif modelType == 'gaussianNB':
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB(**kwargs)
return gnb
elif modelType == 'multinomialNB':
from sklearn.naive_bayes import MultinomialNB
# TODO: figure out how to configure binomial distribution
mnb = MultinomialNB(**kwargs)
return mnb
elif modelType == 'bernoulliNB':
from sklearn.naive_bayes import BernoulliNB
bnb = BernoulliNB(**kwargs)
return bnb
elif modelType == 'randomForest':
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(random_state=234, **kwargs)
return rfc
elif modelType == 'svm':
from sklearn.svm import SVC
svc = SVC(random_state=0, probability=True, **kwargs)
return svc
elif modelType == 'LinearRegression':
#assert column, "Column name required for building a linear model"
#assert dataframe[column].shape == target.shape
from sklearn import linear_model
l_reg = linear_model.LinearRegression(**kwargs)
return l_reg
elif modelType == 'RidgeRegression':
from sklearn.linear_model import Ridge
if not kwargs:
kwargs = {'alpha': 0.5}
ridge_reg = Ridge(**kwargs)
return ridge_reg
elif modelType == 'RidgeRegressionCV':
from sklearn import linear_model
if not kwargs:
kwargs = {'alphas': [0.1, 1.0, 10.0] }
ridge_cv_reg = linear_model.RidgeCV(**kwargs)
return ridge_cv_reg
elif modelType == 'LassoRegression':
from sklearn import linear_model
if not kwargs:
kwargs = {'alpha': 0.1}
lasso_reg = linear_model.Lasso(**kwargs)
return lasso_reg
elif modelType == 'ElasticNetRegression':
from sklearn.metrics import r2_score
from sklearn import linear_model
if not kwargs:
kwargs = {'alpha': 0.1, 'l1_ratio': 0.7}
enet_reg = linear_model.ElasticNet(**kwargs)
return enet_reg
elif modelType == 'LogisticRegression':
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(random_state=123, **kwargs)
return log_reg
elif modelType == 'RANSACRegression':
from sklearn.linear_model import LinearRegression, RANSACRegressor
ransac_model = RANSACRegressor(LinearRegression())
return ransac_model
elif modelType == 'kde':
from sklearn.neighbors.kde import KernelDensity
kde = KernelDensity(kernel='gaussian', bandwidth=0.2, **kwargs)
return kde
elif modelType == 'AR':
import statsmodels.api as sm
# fit an AR model and forecast
ar_fitted = sm.tsa.AR(dataframe).fit(maxlag=9, method='mle', disp=-1, **kwargs)
#ts_forecast = ar_fitted.predict(start='2008', end='2050')
return ar_fitted
elif modelType == 'SARIMAX':
mod = sm.tsa.statespace.SARIMAX(df.riders, trend='n', order=(0,1,0),
seasonal_order=(1,1,1,12), **kwargs)
return mod
elif modelType == 'sgd':
# Online classifiers http://scikit-learn.org/stable/auto_examples/linear_model/plot_sgd_comparison.html
from sklearn.linear_model import SGDClassifier
sgd = SGDClassifier(**kwargs)
return sgd
elif modelType == 'perceptron':
from sklearn.linear_model import Perceptron
perceptron = Perceptron(**kwargs)
return perceptron
elif modelType == 'xgboost':
import xgboost as xgb
xgbm = xgb.XGBClassifier(**kwargs)
return xgbm
elif modelType == 'baseNN':
from keras.models import Sequential
from keras.layers import Dense
# create model
model = Sequential()
assert args.get('inputParams', None)
assert args.get('outputParams', None)
model.add(Dense(inputParams))
model.add(Dense(outputParams))
if args.get('compileParams'):
# Compile model
model.compile(compileParams)# loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
elif modelType == 'lightGBMRegression':
from pylightgbm.models import GBMRegressor
lgbm_lreg = GBMRegressor( num_iterations=100, early_stopping_round=10,
num_leaves=10, min_data_in_leaf=10)
return lgbm_lreg
elif modelType == 'lightGBMBinaryClass':
from pylightgbm.models import GBMClassifier
lgbm_bc = GBMClassifier(metric='binary_error', min_data_in_leaf=1)
return lgbm_bc
# Clustering models
elif modelType == 'KMeans':
assert n_clusters, "Number of clusters argument mandatory"
cluster_callable = KMeans
# seed of 10 for reproducibility.
clusterer = cluster_callable(n_clusters=n_clusters, random_state=10)
return clusterer
elif modelType == 'dbscan':
if not n_clusters:
logging.warn("Number of clusters irrelevant for cluster type : %s"%(modelType))
cluster_callable = DBSCAN
clusterer = cluster_callable(eps=0.5)
return clusterer
elif modelType == 'affinity_prop':
if not n_clusters:
logging.warn("Number of clusters irrelevant for cluster type : %s"%(modelType))
clusterer = AffinityPropagation(damping=.9, preference=-200)
return clusterer
elif modelType == 'spectral':
assert n_clusters, "Number of clusters argument mandatory"
clusterer = SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
return clusterer
elif modelType == 'birch':
if not n_clusters:
logging.warn("Number of clusters irrelevant for cluster type : %s"%(modelType))
clusterer = Birch(n_clusters=2)
return clusterer
elif modelType == 'agglomerativeCluster':
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(dataframe, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
clusterer = AgglomerativeClustering(n_clusters=cluster, linkage='ward',
connectivity=connectivity)
return clusterer
elif modelType == 'meanShift':
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(dataframe, quantile=0.3)
clusterer = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
return clusterer
elif modelType == 'gmm':
from sklearn import mixture
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full')
return gmm
elif modelType == 'dgmm':
from sklearn import mixture
dgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full')
return dgmm
else:
raise 'Unknown model type: see utils.py for available'
|
greytip/data-science-utils
|
datascienceutils/utils.py
|
Python
|
gpl-3.0
| 8,765
|
[
"Gaussian"
] |
3a9d7869e282f0de46c5d208012e73c92a644d533c73ad2fd0a139c99c19fc6f
|
########################################################################
# $HeadURL$
########################################################################
""" DIRAC FileCatalog utilities
"""
__RCSID__ = "$Id$"
try:
import hashlib
md5 = hashlib
except:
import md5
import random, os, time
from types import StringTypes, ListType, DictType
from DIRAC import S_OK, S_ERROR
def checkArgumentFormat( path ):
""" Bring the various possible form of arguments to FileCatalog methods to
the standard dictionary form
"""
def checkArgumentDict( path ):
""" Check and process format of the arguments to FileCatalog methods """
if type( path ) in StringTypes:
urls = {path:True}
elif type( path ) == ListType:
urls = {}
for url in path:
urls[url] = True
elif type( path ) == DictType:
urls = path
else:
return S_ERROR( "checkArgumentDict: Supplied path is not of the correct format" )
return S_OK( urls )
result = checkArgumentDict( path )
if not result['OK']:
return result
pathDict = result['Value']
# Bring the lfn path to the normalized form
urls = {}
for url in pathDict:
mUrl = url
if url.startswith( 'lfn:' ):
mUrl = url[4:]
elif url.startswith( 'LFN:' ):
mUrl = url[4:]
if mUrl.startswith('/grid'):
mUrl = mUrl[5:]
normpath = os.path.normpath( mUrl )
urls[normpath] = pathDict[url]
return S_OK( urls )
def generateGuid( checksum, checksumtype ):
""" Generate a GUID based on the file checksum
"""
if checksum:
if checksumtype == "MD5":
checksumString = checksum
elif checksumtype == "Adler32":
checksumString = str( checksum ).zfill( 32 )
else:
checksumString = ''
if checksumString:
guid = "%s-%s-%s-%s-%s" % ( checksumString[0:8],
checksumString[8:12],
checksumString[12:16],
checksumString[16:20],
checksumString[20:32] )
guid = guid.upper()
return guid
# Failed to use the check sum, generate a new guid
myMd5 = md5.md5()
myMd5.update( str( random.getrandbits( 128 ) ) )
md5HexString = myMd5.hexdigest()
guid = "%s-%s-%s-%s-%s" % ( md5HexString[0:8],
md5HexString[8:12],
md5HexString[12:16],
md5HexString[16:20],
md5HexString[20:32] )
guid = guid.upper()
return guid
def queryTime(f):
""" Decorator to measure the function call time
"""
def measureQueryTime(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
if not 'QueryTime' in result:
result['QueryTime'] = time.time() - start
return result
return measureQueryTime
|
avedaee/DIRAC
|
DataManagementSystem/DB/FileCatalogComponents/Utilities.py
|
Python
|
gpl-3.0
| 2,862
|
[
"DIRAC"
] |
b52839b29e8231d9de5b95020c9378cd841d164d478df04a1453971f626c2c5e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module is used for analysis of materials with potential application as
intercalation batteries.
"""
__author__ = "Anubhav Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "ajain@lbl.gov"
__date__ = "Jan 13, 2012"
__status__ = "Beta"
import itertools
from pymatgen.core.composition import Composition
from pymatgen.core.units import Charge, Time
from pymatgen.phasediagram.maker import PhaseDiagram
from pymatgen.phasediagram.entries import PDEntry
from pymatgen.apps.battery.battery_abc import AbstractElectrode, \
AbstractVoltagePair
from pymatgen.core.periodic_table import Element
from scipy.constants import N_A
class InsertionElectrode(AbstractElectrode):
"""
A set of topotactically related compounds, with different amounts of a
single element, e.g. TiO2 and LiTiO2, that can be used to define an
insertion battery electrode.
"""
def __init__(self, entries, working_ion_entry):
"""
Create a new InsertionElectrode.
Args:
entries: A list of ComputedStructureEntries (or subclasses)
representing the different topotactic states of the battery,
e.g. TiO2 and LiTiO2.
working_ion_entry: A single ComputedEntry or PDEntry
representing the element that carries charge across the
battery, e.g. Li.
"""
self._entries = entries
self._working_ion = working_ion_entry.composition.elements[0]
self._working_ion_entry = working_ion_entry
#Prepare to make phase diagram: determine elements and set their energy
#to be very high
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
#Set an artificial energy for each element for convex hull generation
element_energy = max([entry.energy_per_atom for entry in entries]) + 10
pdentries = []
pdentries.extend(entries)
pdentries.extend([PDEntry(Composition({el:1}), element_energy)
for el in elements])
#Make phase diagram to determine which entries are stable vs. unstable
pd = PhaseDiagram(pdentries)
lifrac = lambda e: e.composition.get_atomic_fraction(self._working_ion)
#stable entries ordered by amount of Li asc
self._stable_entries = tuple(sorted([e for e in pd.stable_entries
if e in entries], key=lifrac))
#unstable entries ordered by amount of Li asc
self._unstable_entries = tuple(sorted([e for e in pd.unstable_entries
if e in entries], key=lifrac))
#create voltage pairs
self._vpairs = tuple([InsertionVoltagePair(self._stable_entries[i],
self._stable_entries[i + 1],
working_ion_entry)
for i in range(len(self._stable_entries) - 1)])
@property
def working_ion(self):
"""
The working ion as an Element object
"""
return self._working_ion
@property
def working_ion_entry(self):
return self._working_ion_entry
@property
def voltage_pairs(self):
return self._vpairs
def get_stable_entries(self, charge_to_discharge=True):
"""
Get the stable entries.
Args:
charge_to_discharge: order from most charge to most discharged
state? Default to True.
Returns:
A list of stable entries in the electrode, ordered by amount of the
working ion.
"""
list_copy = list(self._stable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_unstable_entries(self, charge_to_discharge=True):
"""
Returns the unstable entries for the electrode.
Args:
charge_to_discharge: Order from most charge to most discharged
state? Defaults to True.
Returns:
A list of unstable entries in the electrode, ordered by amount of
the working ion.
"""
list_copy = list(self._unstable_entries)
return list_copy if charge_to_discharge else list_copy.reverse()
def get_all_entries(self, charge_to_discharge=True):
"""
Return all entries input for the electrode.
Args:
charge_to_discharge:
order from most charge to most discharged state? Defaults to
True.
Returns:
A list of all entries in the electrode (both stable and unstable),
ordered by amount of the working ion.
"""
all_entries = list(self.get_stable_entries())
all_entries.extend(self.get_unstable_entries())
#sort all entries by amount of working ion ASC
fsrt = lambda e: e.composition.get_atomic_fraction(self.working_ion)
all_entries = sorted([e for e in all_entries],
key=fsrt)
return all_entries if charge_to_discharge else all_entries.reverse()
@property
def fully_charged_entry(self):
"""
The most charged entry along the topotactic path.
"""
return self._stable_entries[0]
@property
def fully_discharged_entry(self):
"""
The most discharged entry along the topotactic path.
"""
return self._stable_entries[-1]
def get_max_instability(self, min_voltage=None, max_voltage=None):
"""
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return max(data) if len(data) > 0 else None
def get_min_instability(self, min_voltage=None, max_voltage=None):
"""
The minimum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Minimum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.decomp_e_charge is not None:
data.append(pair.decomp_e_charge)
if pair.decomp_e_discharge is not None:
data.append(pair.decomp_e_discharge)
return min(data) if len(data) > 0 else None
def get_max_muO2(self, min_voltage=None, max_voltage=None):
"""
Maximum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return max(data) if len(data) > 0 else None
def get_min_muO2(self, min_voltage=None, max_voltage=None):
"""
Minimum critical oxygen chemical potential along path.
Args:
min_voltage: The minimum allowable voltage for a given step
max_voltage: The maximum allowable voltage allowable for a given
step
Returns:
Minimum critical oxygen chemical of all compounds along the
insertion path (a subset of the path can be chosen by the optional
arguments).
"""
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if pair.pair.muO2_discharge is not None:
data.append(pair.pair.muO2_discharge)
if pair.muO2_charge is not None:
data.append(pair.muO2_charge)
return min(data) if len(data) > 0 else None
def get_sub_electrodes(self, adjacent_only=True, include_myself=True):
"""
If this electrode contains multiple voltage steps, then it is possible
to use only a subset of the voltage steps to define other electrodes.
For example, an LiTiO2 electrode might contain three subelectrodes:
[LiTiO2 --> TiO2, LiTiO2 --> Li0.5TiO2, Li0.5TiO2 --> TiO2]
This method can be used to return all the subelectrodes with some
options
Args:
adjacent_only: Only return electrodes from compounds that are
adjacent on the convex hull, i.e. no electrodes returned
will have multiple voltage steps if this is set True.
include_myself: Include this identical electrode in the list of
results.
Returns:
A list of InsertionElectrode objects
"""
battery_list = []
pair_it = self._vpairs if adjacent_only \
else itertools.combinations_with_replacement(self._vpairs, 2)
ion = self._working_ion
for pair in pair_it:
entry_charge = pair.entry_charge if adjacent_only \
else pair[0].entry_charge
entry_discharge = pair.entry_discharge if adjacent_only \
else pair[1].entry_discharge
chg_frac = entry_charge.composition.get_atomic_fraction(ion)
dischg_frac = entry_discharge.composition.get_atomic_fraction(ion)
def in_range(entry):
frac = entry.composition.get_atomic_fraction(ion)
return chg_frac <= frac <= dischg_frac
if include_myself or entry_charge != self.fully_charged_entry \
or entry_discharge != self.fully_discharged_entry:
unstable_entries = filter(in_range,
self.get_unstable_entries())
stable_entries = filter(in_range, self.get_stable_entries())
all_entries = list(stable_entries)
all_entries.extend(unstable_entries)
battery_list.append(self.__class__(all_entries,
self.working_ion_entry))
return battery_list
def as_dict_summary(self, print_subelectrodes=True):
"""
Generate a summary dict.
Args:
print_subelectrodes: Also print data on all the possible
subelectrodes.
Returns:
A summary of this electrode"s properties in dict format.
"""
chg_comp = self.fully_charged_entry.composition
dischg_comp = self.fully_discharged_entry.composition
ion = self.working_ion
d = {"average_voltage": self.get_average_voltage(),
"max_voltage": self.max_voltage,
"min_voltage": self.min_voltage,
"max_delta_volume": self.max_delta_volume,
"max_voltage_step": self.max_voltage_step,
"capacity_grav": self.get_capacity_grav(),
"capacity_vol": self.get_capacity_vol(),
"energy_grav": self.get_specific_energy(),
"energy_vol": self.get_energy_density(),
"working_ion": self._working_ion.symbol,
"nsteps": self.num_steps,
"framework": self._vpairs[0].framework.to_data_dict,
"formula_charge": chg_comp.reduced_formula,
"formula_discharge": dischg_comp.reduced_formula,
"fracA_charge": chg_comp.get_atomic_fraction(ion),
"fracA_discharge": dischg_comp.get_atomic_fraction(ion),
"max_instability": self.get_max_instability(),
"min_instability": self.get_min_instability()}
if print_subelectrodes:
f_dict = lambda c: c.as_dict_summary(print_subelectrodes=False)
d["adj_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=True))
d["all_pairs"] = map(f_dict,
self.get_sub_electrodes(adjacent_only=False))
return d
def __str__(self):
return self.__repr__()
def __repr__(self):
output = []
chg_form = self.fully_charged_entry.composition.reduced_formula
dischg_form = self.fully_discharged_entry.composition.reduced_formula
output.append("InsertionElectrode with endpoints at {} and {}".format(
chg_form, dischg_form))
output.append("Avg. volt. = {} V".format(self.get_average_voltage()))
output.append("Grav. cap. = {} mAh/g".format(self.get_capacity_grav()))
output.append("Vol. cap. = {}".format(self.get_capacity_vol()))
return "\n".join(output)
@classmethod
def from_dict(cls, d):
from monty.json import MontyDecoder
dec = MontyDecoder()
return cls(dec.process_decoded(d["entries"]),
dec.process_decoded(d["working_ion_entry"]))
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entries": [entry.as_dict() for entry in self._entries],
"working_ion_entry": self.working_ion_entry.as_dict()}
class InsertionVoltagePair(AbstractVoltagePair):
"""
Defines an Insertion Voltage Pair.
Args:
entry1: Entry corresponding to one of the entries in the voltage step.
entry2: Entry corresponding to the other entry in the voltage step.
working_ion_entry: A single ComputedEntry or PDEntry representing
the element that carries charge across the battery, e.g. Li.
"""
def __init__(self, entry1, entry2, working_ion_entry):
#initialize some internal variables
working_element = working_ion_entry.composition.elements[0]
entry_charge = entry1
entry_discharge = entry2
if entry_charge.composition.get_atomic_fraction(working_element) \
> entry2.composition.get_atomic_fraction(working_element):
(entry_charge, entry_discharge) = (entry_discharge, entry_charge)
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
ion_sym = working_element.symbol
frame_charge_comp = Composition({el: comp_charge[el]
for el in comp_charge
if el.symbol != ion_sym})
frame_discharge_comp = Composition({el: comp_discharge[el]
for el in comp_discharge
if el.symbol != ion_sym})
#Data validation
#check that the ion is just a single element
if not working_ion_entry.composition.is_element:
raise ValueError("VoltagePair: The working ion specified must be "
"an element")
#check that at least one of the entries contains the working element
if not comp_charge.get_atomic_fraction(working_element) > 0 and \
not comp_discharge.get_atomic_fraction(working_element) > 0:
raise ValueError("VoltagePair: The working ion must be present in "
"one of the entries")
#check that the entries do not contain the same amount of the workin
#element
if comp_charge.get_atomic_fraction(working_element) == \
comp_discharge.get_atomic_fraction(working_element):
raise ValueError("VoltagePair: The working ion atomic percentage "
"cannot be the same in both the entries")
#check that the frameworks of the entries are equivalent
if not frame_charge_comp.reduced_formula == \
frame_discharge_comp.reduced_formula:
raise ValueError("VoltagePair: the specified entries must have the"
" same compositional framework")
#Initialize normalization factors, charged and discharged entries
valence_list = Element(ion_sym).oxidation_states
working_ion_valence = max(valence_list)
(self.framework,
norm_charge) = frame_charge_comp.get_reduced_composition_and_factor()
norm_discharge = \
frame_discharge_comp.get_reduced_composition_and_factor()[1]
self._working_ion_entry = working_ion_entry
#Initialize normalized properties
self._vol_charge = entry_charge.structure.volume / norm_charge
self._vol_discharge = entry_discharge.structure.volume / norm_discharge
comp_charge = entry_charge.composition
comp_discharge = entry_discharge.composition
self._mass_charge = comp_charge.weight / norm_charge
self._mass_discharge = comp_discharge.weight / norm_discharge
self._num_ions_transferred = \
(comp_discharge[working_element] / norm_discharge) \
- (comp_charge[working_element] / norm_charge)
self._voltage = \
(((entry_charge.energy / norm_charge) -
(entry_discharge.energy / norm_discharge)) / \
self._num_ions_transferred + working_ion_entry.energy_per_atom) / working_ion_valence
self._mAh = self._num_ions_transferred * Charge(1, "e").to("C") * \
Time(1, "s").to("h") * N_A * 1000 * working_ion_valence
#Step 4: add (optional) hull and muO2 data
self.decomp_e_charge = \
entry_charge.data.get("decomposition_energy", None)
self.decomp_e_discharge = \
entry_discharge.data.get("decomposition_energy", None)
self.muO2_charge = entry_charge.data.get("muO2", None)
self.muO2_discharge = entry_discharge.data.get("muO2", None)
self.entry_charge = entry_charge
self.entry_discharge = entry_discharge
self.normalization_charge = norm_charge
self.normalization_discharge = norm_discharge
self._frac_charge = comp_charge.get_atomic_fraction(working_element)
self._frac_discharge = \
comp_discharge.get_atomic_fraction(working_element)
@property
def frac_charge(self):
return self._frac_charge
@property
def frac_discharge(self):
return self._frac_discharge
@property
def voltage(self):
return self._voltage
@property
def mAh(self):
return self._mAh
@property
def mass_charge(self):
return self._mass_charge
@property
def mass_discharge(self):
return self._mass_discharge
@property
def vol_charge(self):
return self._vol_charge
@property
def vol_discharge(self):
return self._vol_discharge
@property
def working_ion_entry(self):
return self._working_ion_entry
def __repr__(self):
output = ["Insertion voltage pair with working ion {}"
.format(self._working_ion_entry.composition.reduced_formula),
"V = {}, mAh = {}".format(self.voltage, self.mAh),
"mass_charge = {}, mass_discharge = {}"
.format(self.mass_charge, self.mass_discharge),
"vol_charge = {}, vol_discharge = {}"
.format(self.vol_charge, self.vol_discharge),
"frac_charge = {}, frac_discharge = {}"
.format(self.frac_charge, self.frac_discharge)]
return "\n".join(output)
def __str__(self):
return self.__repr__()
|
xhqu1981/pymatgen
|
pymatgen/apps/battery/insertion_battery.py
|
Python
|
mit
| 20,634
|
[
"pymatgen"
] |
cf920e56d008b8cd87a48665b572fc8395cb159c993aa32e354d9d933bcbd3d6
|
#!/usr/bin/env python
# encoding: utf-8
r"""
Simple fit
==========
This tutorial shows how to do the simplest inversion case, a curve fit, by
setting up a custom forward operator. The function to be fitted is`
.. math::
f(x) = A * e^{-x/X}
with the two unknown coefficients A (a signal amplitude) and X (a decay rate).
Both A and X are assumed to be positive which is often the case for physical
properties. The easiest way to do this is via a logarithmic transformation of
the model vector (containing A and X) which is very easily done in pyGIMLi.
First we import the pygimli library under a short name pg and the numerics
library numpy. Additionally we load the python plotting module of the library
matplotlib. Both are contained in most python distributions and systems.
"""
import pygimli as pg
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# We set up the modelling operator, i.e. to return :math:`{\bf f}({\bf x})` for
# given model parameters A and X subsumed in a vector. In order to be able to
# use operator in inversion, we derive from the abstract modelling base class.
# The latter holds the main mimic of generating Jacobian and adminstrating the
# model, the regularization and so on. The only function to overwrite is
# **response()**. If no function **createJacobian** is provided, they are
# computed by brute force (forward calculations with altered parameters).
class ExpModelling(pg.Modelling):
def __init__(self, xvec, verbose=False):
super().__init__()
self.x = xvec
def response(self, model):
return model[0] * pg.exp(-self.x / model[1])
def createStartModel(self, dataVals):
return pg.Vector([1.0, 3.0])
###############################################################################
# The init function saves the x vector and defines the parameterization, i.e.
# two independent parameters (a 1D mesh with 1 cell and 2 properties).
# The response function computes the function using A=model[0] and X=model[1]
# The function startModel defines a meaningful starting vector. There are other
# methods to set the starting model as inv.setModel() but this one is a default
# one for people who use the class and forget about a starting model.
# We first create an abscissa vector using numpy (note that pygimli also
# provides an exp function and generate synthetic data with two arbitrary A and
# X values.
x = np.arange(0, 1, 1e-2)
data = 10.5 * np.exp(- x / 550e-3)
###############################################################################
# We define an (absolute) error level and add Gaussian noise to the data.
error = 0.5
data += pg.randn(*data.shape)*error
relError = error / data
###############################################################################
# Next, an instance of the forward operator is created. We could use it for
# calculating the synthetic data using f.response([10.5, 0.55]) or just
# f([10.5, 0.55]). We create a real-valued (R) inversion passing the forward
# operator, the data. A verbose boolean flag could be added to provide some
# output the inversion, another one prints more and saves files for debugging.
f = ExpModelling(x)
inv = pg.Inversion(f)
###############################################################################
# We create a real-valued logarithmic transformation and apply it to the model.
# Similar could be done for the data which are by default treated linearly.
# We then set the error level that is used for data weighting. It can be a
# float number or a vector of data length. One can also set a relative error.
# Finally, we define the inversion style as Marquardt scheme (pure local damping
# with decreasing the regularization parameter subsequently) and start with a
# relatively large regularization strength to avoid overshoot.
# Finally run yields the coefficient vector and we plot some statistics.
tLog = pg.trans.TransLog()
f.modelTrans = tLog
inv._inv.setMarquardtScheme()
inv._inv.setLambda(100)
coeff = inv.run(data, relError, verbose=True)
print(inv.relrms(), inv.chi2())
print(coeff)
###############################################################################
# We see that after 5 iterations the absolute rms value equals the noise level
# corresponding to a chi-squared misfit value of 1 as it should be the case for
# synthetic data. The relative rms (in %) is less relevant here but can be for
# other applications. Additionally the ranges for model and model response are
# given and the objective function consisting of data misfit and model
# roughness times lambda. Note that due to the local regularization the second
# term does not contribute to Phi. Set verbose to True to see the whole course
# of inversion. The values of the coefficient vector (a GIMLi real vector) are
# as expected close (i.e. equivalent) to the synthetic model.
###############################################################################
# We finally create a plotting figure and plot both data and model response.
plt.figure()
plt.plot(x, data, 'rx', x, inv.response, 'b-')
###############################################################################
# The createMesh1D automatically attributed the markers 0 and 1 to the two
# model parameters A and X, respectively. Each marker leads to a region that
# can be individually treated, e.g. the starting value, lower or upper bounds,
# or all three at the same time (setParameters). This changes the model
# transformation which can of course be region-specific.
# f.region(0).setLowerBound(0.1)
# f.region(0).setStartModel(3)
# f.region(1).setParameters(0.3, 0.01, 1.0)
###############################################################################
# If these are set before the inversion is used, they are used automatically.
# We set the model by hand using the new starting model
# inv.setVerbose(True)
# inv.setModel(f.createStartModel())
# print(inv.run())
# inv.echoStatus()
###############################################################################
# The result is pretty much the same as before but for stronger equivalence or
# smoothness-constrained regularization prior information might help a lot.
|
gimli-org/gimli
|
doc/tutorials/3_inversion/plot_0-expfit.py
|
Python
|
apache-2.0
| 6,241
|
[
"Gaussian"
] |
a3c250f3e13d80804d8f09c41d66fc34ca909855c8f61bca730225d830e0255d
|
import sys, json, os
from solver.commonSolver import CommonSolver
from logic.smbool import SMBool
from logic.smboolmanager import SMBoolManagerPlando as SMBoolManager
from logic.helpers import Pickup
from rom.rompatcher import RomPatcher
from rom.rom_patches import RomPatches
from graph.graph import AccessGraphSolver as AccessGraph
from graph.graph_utils import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, GraphUtils
from graph.location import define_location
from utils.utils import removeChars
from solver.conf import Conf
from utils.parameters import hard, infinity
from solver.solverState import SolverState
from solver.comeback import ComeBack
from rando.ItemLocContainer import ItemLocation
from utils.doorsmanager import DoorsManager
from logic.logic import Logic
from utils.objectives import Objectives
import utils.log
class InteractiveSolver(CommonSolver):
def __init__(self, output, logic):
self.interactive = True
self.errorMsg = ""
self.checkDuplicateMajor = False
self.vcr = None
self.log = utils.log.get('Solver')
self.outputFileName = output
self.firstLogFile = None
Logic.factory(logic)
self.locations = Logic.locations
(self.locsAddressName, self.locsWeb2Internal) = self.initLocsAddressName()
self.transWeb2Internal = self.initTransitionsName()
Conf.difficultyTarget = infinity
self.objectives = Objectives()
# no time limitation
self.runtimeLimit_s = 0
# used by auto tracker to know how many locs have changed
self.locDelta = 0
def initLocsAddressName(self):
addressName = {}
web2Internal = {}
for loc in Logic.locations:
webName = self.locNameInternal2Web(loc.Name)
addressName[loc.Address % 0x10000] = webName
web2Internal[webName] = loc.Name
return (addressName, web2Internal)
def initTransitionsName(self):
web2Internal = {}
for (startPoint, endPoint) in vanillaTransitions + vanillaBossesTransitions + vanillaEscapeTransitions:
for point in [startPoint, endPoint]:
web2Internal[self.apNameInternal2Web(point)] = point
return web2Internal
def dumpState(self):
state = SolverState(self.debug)
state.fromSolver(self)
state.toJson(self.outputFileName)
def initialize(self, mode, rom, presetFileName, magic, fill, startLocation):
# load rom and preset, return first state
self.debug = mode == "debug"
self.mode = mode
if self.mode != "seedless":
self.seed = os.path.basename(os.path.splitext(rom)[0])+'.sfc'
else:
self.seed = "seedless"
self.smbm = SMBoolManager()
self.presetFileName = presetFileName
self.loadPreset(self.presetFileName)
self.loadRom(rom, interactive=True, magic=magic, startLocation=startLocation)
# in plando/tracker always consider that we're doing full
self.majorsSplit = 'Full'
# hide doors
if self.doorsRando and mode in ['standard', 'race']:
DoorsManager.initTracker()
self.clearItems()
# in debug mode don't load plando locs/transitions
if self.mode == 'plando' and self.debug == False:
if fill == True:
# load the source seed transitions and items/locations
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.areaGraph = AccessGraph(Logic.accessPoints, self.curGraphTransitions)
self.fillPlandoLocs()
else:
if self.areaRando == True or self.bossRando == True:
plandoTrans = self.loadPlandoTransitions()
if len(plandoTrans) > 0:
self.curGraphTransitions = plandoTrans
self.areaGraph = AccessGraph(Logic.accessPoints, self.curGraphTransitions)
self.loadPlandoLocs()
# compute new available locations
self.computeLocationsDifficulty(self.majorLocations)
self.dumpState()
def iterate(self, stateJson, scope, action, params):
self.debug = params["debug"]
self.smbm = SMBoolManager()
state = SolverState()
state.fromJson(stateJson)
state.toSolver(self)
self.loadPreset(self.presetFileName)
# add already collected items to smbm
self.smbm.addItems(self.collectedItems)
if scope == 'item':
if action == 'clear':
self.clearItems(True)
else:
if action == 'add':
if self.mode in ['plando', 'seedless', 'race', 'debug']:
if params['loc'] != None:
if self.mode == 'plando':
self.setItemAt(params['loc'], params['item'], params['hide'])
else:
itemName = params.get('item', 'Nothing')
if itemName is None:
itemName = 'Nothing'
self.setItemAt(params['loc'], itemName, False)
else:
self.increaseItem(params['item'])
else:
# pickup item at locName
self.pickItemAt(params['loc'])
elif action == 'remove':
if 'loc' in params:
self.removeItemAt(params['loc'])
elif 'count' in params:
# remove last collected item
self.cancelLastItems(params['count'])
else:
self.decreaseItem(params['item'])
elif action == 'replace':
self.replaceItemAt(params['loc'], params['item'], params['hide'])
elif action == 'toggle':
self.toggleItem(params['item'])
elif action == 'upload_scav':
self.updatePlandoScavengerOrder(params['plandoScavengerOrder'])
elif scope == 'area':
if action == 'clear':
self.clearTransitions()
else:
if action == 'add':
startPoint = params['startPoint']
endPoint = params['endPoint']
self.addTransition(self.transWeb2Internal[startPoint], self.transWeb2Internal[endPoint])
elif action == 'remove':
if 'startPoint' in params:
self.cancelTransition(self.transWeb2Internal[params['startPoint']])
else:
# remove last transition
self.cancelLastTransition()
elif scope == 'door':
if action == 'replace':
doorName = params['doorName']
newColor = params['newColor']
DoorsManager.setColor(doorName, newColor)
elif action == 'toggle':
doorName = params['doorName']
DoorsManager.switchVisibility(doorName)
elif action == 'clear':
DoorsManager.initTracker()
elif scope == 'dump':
if action == 'import':
self.importDump(params["dump"])
self.areaGraph = AccessGraph(Logic.accessPoints, self.curGraphTransitions)
if scope == 'common':
if action == 'save':
return self.savePlando(params['lock'], params['escapeTimer'])
elif action == 'randomize':
self.randoPlando(params)
rewindLimit = self.locDelta if scope == 'dump' and self.locDelta > 0 else 1
lastVisitedLocs = []
# if last loc added was a sequence break, recompute its difficulty,
# as it may be available with the newly placed item.
# generalize it for auto-tracker where we can add more than one loc at once.
if len(self.visitedLocations) > 0:
for i in range(1, rewindLimit+1):
if i > len(self.visitedLocations):
break
else:
loc = self.visitedLocations[-i]
if loc.difficulty.difficulty == -1:
lastVisitedLocs.append(loc)
for loc in lastVisitedLocs:
self.visitedLocations.remove(loc)
self.majorLocations.append(loc)
# compute new available locations
self.clearLocs(self.majorLocations)
self.computeLocationsDifficulty(self.majorLocations)
while True:
remainLocs = []
okLocs = []
for loc in lastVisitedLocs:
if loc.difficulty == False:
remainLocs.append(loc)
else:
okLocs.append(loc)
if len(remainLocs) == len(lastVisitedLocs):
# all remaining locs are seq break
for loc in lastVisitedLocs:
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
if loc.difficulty == False:
# if the loc is still sequence break, put it back as sequence break
loc.difficulty = SMBool(True, -1)
break
else:
# add available locs
for loc in okLocs:
lastVisitedLocs.remove(loc)
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
# compute again
self.clearLocs(self.majorLocations)
self.computeLocationsDifficulty(self.majorLocations)
# return them
self.dumpState()
def getLocNameFromAddress(self, address):
return self.locsAddressName[address]
def loadPlandoTransitions(self):
# add escape transition
transitionsAddr = self.romLoader.getPlandoTransitions(len(vanillaBossesTransitions) + len(vanillaTransitions) + 1)
return GraphUtils.getTransitions(transitionsAddr)
def loadPlandoLocs(self):
# get the addresses of the already filled locs, with the correct order
addresses = self.romLoader.getPlandoAddresses()
# create a copy of the locations to avoid removing locs from self.locations
self.majorLocations = self.locations[:]
for address in addresses:
# TODO::compute only the difficulty of the current loc
self.computeLocationsDifficulty(self.majorLocations)
locName = self.getLocNameFromAddress(address)
self.pickItemAt(locName)
def fillPlandoLocs(self):
self.pickup = Pickup("all")
self.comeBack = ComeBack(self)
# backup
mbLoc = self.getLoc("Mother Brain")
locationsBck = self.locations[:]
self.lastAP = self.startLocation
self.lastArea = self.startArea
(self.difficulty, self.itemsOk) = self.computeDifficulty()
# put back mother brain location
if mbLoc not in self.majorLocations and mbLoc not in self.visitedLocations:
self.majorLocations.append(mbLoc)
if self.itemsOk == False:
# add remaining locs as sequence break
for loc in self.majorLocations[:]:
loc.difficulty = SMBool(True, -1)
if loc.accessPoint is not None:
# take first ap of the loc
loc.accessPoint = list(loc.AccessFrom)[0]
self.collectMajor(loc)
self.locations = locationsBck
def fillGraph(self):
# add self looping transitions on unused acces points
usedAPs = {}
for (src, dst) in self.curGraphTransitions:
usedAPs[src] = True
usedAPs[dst] = True
singleAPs = []
for ap in Logic.accessPoints:
if ap.isInternal() == True:
continue
if ap.Name not in usedAPs:
singleAPs.append(ap.Name)
transitions = self.curGraphTransitions[:]
for apName in singleAPs:
transitions.append((apName, apName))
return transitions
def randoPlando(self, parameters):
# if all the locations are visited, do nothing
if len(self.majorLocations) == 0:
return
plandoLocsItems = {}
for loc in self.visitedLocations:
plandoLocsItems[loc.Name] = loc.itemName
plandoCurrent = {
"locsItems": plandoLocsItems,
"transitions": self.fillGraph(),
"patches": RomPatches.ActivePatches,
"doors": DoorsManager.serialize(),
"forbiddenItems": parameters["forbiddenItems"]
}
plandoCurrentJson = json.dumps(plandoCurrent)
from utils.utils import getPythonExec
params = [
getPythonExec(), os.path.expanduser("~/RandomMetroidSolver/randomizer.py"),
'--runtime', '10',
'--param', self.presetFileName,
'--output', self.outputFileName,
'--plandoRando', plandoCurrentJson,
'--progressionSpeed', 'speedrun',
'--minorQty', parameters["minorQty"],
'--maxDifficulty', 'hardcore',
'--energyQty', parameters["energyQty"],
'--startLocation', self.startLocation
]
import subprocess
subprocess.call(params)
with open(self.outputFileName, 'r') as jsonFile:
data = json.load(jsonFile)
self.errorMsg = data["errorMsg"]
# load the locations
if "itemLocs" in data:
self.clearItems(reload=True)
itemsLocs = data["itemLocs"]
# create a copy because we need self.locations to be full, else the state will be empty
self.majorLocations = self.locations[:]
for itemLoc in itemsLocs:
locName = itemLoc["Location"]["Name"]
loc = self.getLoc(locName)
# we can have locations from non connected areas
if "difficulty" in itemLoc["Location"]:
difficulty = itemLoc["Location"]["difficulty"]
smbool = SMBool(difficulty["bool"], difficulty["difficulty"], difficulty["knows"], difficulty["items"])
loc.difficulty = smbool
itemName = itemLoc["Item"]["Type"]
loc.itemName = itemName
loc.accessPoint = itemLoc["Location"]["accessPoint"]
self.collectMajor(loc)
def savePlando(self, lock, escapeTimer):
# store filled locations addresses in the ROM for next creating session
errorMsg = ""
from rando.Items import ItemManager
locsItems = {}
itemLocs = []
for loc in self.visitedLocations:
locsItems[loc.Name] = loc.itemName
for loc in self.locations:
if loc.Name in locsItems:
itemLocs.append(ItemLocation(ItemManager.getItem(loc.itemName), loc))
else:
# put nothing items in unused locations
itemLocs.append(ItemLocation(ItemManager.getItem("Nothing"), loc))
# patch the ROM
if lock == True:
import random
magic = random.randint(1, 0xffff)
else:
magic = None
romPatcher = RomPatcher(magic=magic)
patches = ['credits_varia.ips', 'tracking.ips', "Escape_Animals_Disable"]
if DoorsManager.isRandom():
patches += RomPatcher.IPSPatches['DoorsColors']
patches.append("Enable_Backup_Saves")
if magic != None:
patches.insert(0, 'race_mode.ips')
patches.append('race_mode_post.ips')
romPatcher.addIPSPatches(patches)
plms = []
if self.areaRando == True or self.bossRando == True or self.escapeRando == True:
doors = GraphUtils.getDoorConnections(AccessGraph(Logic.accessPoints, self.fillGraph()), self.areaRando, self.bossRando, self.escapeRando, False)
romPatcher.writeDoorConnections(doors)
if magic == None:
doorsPtrs = GraphUtils.getAps2DoorsPtrs()
romPatcher.writePlandoTransitions(self.curGraphTransitions, doorsPtrs,
len(vanillaBossesTransitions) + len(vanillaTransitions))
if self.escapeRando == True and escapeTimer != None:
# convert from '03:00' to number of seconds
escapeTimer = int(escapeTimer[0:2]) * 60 + int(escapeTimer[3:5])
romPatcher.applyEscapeAttributes({'Timer': escapeTimer, 'Animals': None, 'patches': []}, plms)
# write plm table & random doors
romPatcher.writePlmTable(plms, self.areaRando, self.bossRando, self.startLocation)
romPatcher.writeItemsLocs(itemLocs)
romPatcher.writeItemsNumber()
romPatcher.writeSpoiler(itemLocs)
# plando is considered Full
majorsSplit = self.masterMajorsSplit if self.masterMajorsSplit in ["FullWithHUD", "Scavenger"] else "Full"
progItemLocs = []
if majorsSplit == "Scavenger":
def getLoc(locName):
for loc in self.locations:
if loc.Name == locName:
return loc
for locName in self.plandoScavengerOrder:
progItemLocs.append(ItemLocation(Location=getLoc(locName)))
if locName not in locsItems:
errorMsg = "Nothing at a Scavenger location, seed is unfinishable"
romPatcher.writeSplitLocs(majorsSplit, itemLocs, progItemLocs)
romPatcher.writeMajorsSplit(majorsSplit)
class FakeRandoSettings:
def __init__(self):
self.qty = {'energy': 'plando'}
self.progSpeed = 'plando'
self.progDiff = 'plando'
self.restrictions = {'Suits': False, 'Morph': 'plando'}
self.superFun = {}
randoSettings = FakeRandoSettings()
romPatcher.writeRandoSettings(randoSettings, itemLocs)
if magic != None:
romPatcher.writeMagic()
else:
romPatcher.writePlandoAddresses(self.visitedLocations)
romPatcher.commitIPS()
romPatcher.end()
data = romPatcher.romFile.data
preset = os.path.splitext(os.path.basename(self.presetFileName))[0]
seedCode = 'FX'
if self.bossRando == True:
seedCode = 'B'+seedCode
if DoorsManager.isRandom():
seedCode = 'D'+seedCode
if self.areaRando == True:
seedCode = 'A'+seedCode
from time import gmtime, strftime
fileName = 'VARIA_Plandomizer_{}{}_{}.sfc'.format(seedCode, strftime("%Y%m%d%H%M%S", gmtime()), preset)
data["fileName"] = fileName
# error msg in json to be displayed by the web site
data["errorMsg"] = errorMsg
with open(self.outputFileName, 'w') as jsonFile:
json.dump(data, jsonFile)
def locNameInternal2Web(self, locName):
return removeChars(locName, " ,()-")
def locNameWeb2Internal(self, locNameWeb):
return self.locsWeb2Internal[locNameWeb]
def apNameInternal2Web(self, apName):
return apName[0].lower() + removeChars(apName[1:], " ")
def getWebLoc(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
for loc in self.locations:
if loc.Name == locName:
return loc
raise Exception("Location '{}' not found".format(locName))
def pickItemAt(self, locName):
# collect new item at newLoc
loc = self.getWebLoc(locName)
# check that location has not already been visited
if loc in self.visitedLocations:
self.errorMsg = "Location '{}' has already been visited".format(loc.Name)
return
if loc.difficulty is None or loc.difficulty == False:
# sequence break
loc.difficulty = SMBool(True, -1)
if loc.accessPoint is None:
# take first ap of the loc
loc.accessPoint = list(loc.AccessFrom)[0]
self.collectMajor(loc)
def setItemAt(self, locName, itemName, hide):
# set itemName at locName
loc = self.getWebLoc(locName)
# check if location has not already been visited
if loc in self.visitedLocations:
self.errorMsg = "Location {} has already been visited".format(loc.Name)
return
# plando mode
loc.itemName = itemName
if loc.difficulty is None:
# sequence break
loc.difficulty = SMBool(True, -1)
if loc.accessPoint is None:
# take first ap of the loc
loc.accessPoint = list(loc.AccessFrom)[0]
if hide == True:
loc.Visibility = 'Hidden'
self.collectMajor(loc, itemName)
def replaceItemAt(self, locName, itemName, hide):
# replace itemName at locName
loc = self.getWebLoc(locName)
oldItemName = loc.itemName
# replace item at the old item spot in collectedItems
try:
index = next(i for i, vloc in enumerate(self.visitedLocations) if vloc.Name == loc.Name)
except Exception as e:
self.errorMsg = "Empty location {}".format(locName)
return
# major item can be set multiple times in plando mode
count = self.collectedItems.count(oldItemName)
isCount = self.smbm.isCountItem(oldItemName)
# update item in collected items after we check the count
self.collectedItems[index] = itemName
loc.itemName = itemName
# update smbm if count item or major was only there once
if isCount == True or count == 1:
self.smbm.removeItem(oldItemName)
if hide == True:
loc.Visibility = 'Hidden'
elif loc.CanHidden == True and loc.Visibility == 'Hidden':
# the loc was previously hidden, set it back to visible
loc.Visibility = 'Visible'
self.smbm.addItem(itemName)
def increaseItem(self, item):
# add item at begining of collectedItems to not mess with item removal when cancelling a location
self.collectedItems.insert(0, item)
self.smbm.addItem(item)
def decreaseItem(self, item):
if item in self.collectedItems:
self.collectedItems.remove(item)
self.smbm.removeItem(item)
def toggleItem(self, item):
# add or remove a major item
if item in self.collectedItems:
self.collectedItems.remove(item)
self.smbm.removeItem(item)
else:
self.collectedItems.insert(0, item)
self.smbm.addItem(item)
def clearItems(self, reload=False):
self.collectedItems = []
self.visitedLocations = []
self.lastAP = self.startLocation
self.lastArea = self.startArea
self.majorLocations = self.locations
if reload == True:
for loc in self.majorLocations:
loc.difficulty = None
self.smbm.resetItems()
def updatePlandoScavengerOrder(self, plandoScavengerOrder):
self.plandoScavengerOrder = plandoScavengerOrder
def addTransition(self, startPoint, endPoint):
# already check in controller if transition is valid for seed
self.curGraphTransitions.append((startPoint, endPoint))
def cancelLastTransition(self):
if self.areaRando == True and self.bossRando == True:
if len(self.curGraphTransitions) > 0:
self.curGraphTransitions.pop()
elif self.areaRando == True:
if len(self.curGraphTransitions) > len(self.bossTransitions) + (1 if self.escapeRando == False else 0):
self.curGraphTransitions.pop()
elif self.bossRando == True:
print("len cur graph: {} len area: {} len escape: {} len sum: {}".format(len(self.curGraphTransitions), len(self.areaTransitions), 1 if self.escapeRando == False else 0, len(self.areaTransitions) + (1 if self.escapeRando == False else 0)))
if len(self.curGraphTransitions) > len(self.areaTransitions) + (1 if self.escapeRando == False else 0):
self.curGraphTransitions.pop()
elif self.escapeRando == True:
if len(self.curGraphTransitions) > len(self.areaTransitions) + len(self.bossTransitions):
self.curGraphTransitions.pop()
def cancelTransition(self, startPoint):
# get end point
endPoint = None
for (i, (start, end)) in enumerate(self.curGraphTransitions):
if start == startPoint:
endPoint = end
break
elif end == startPoint:
endPoint = start
break
if endPoint == None:
# shouldn't happen
return
# check that transition is cancelable
if self.areaRando == True and self.bossRando == True and self.escapeRando == True:
if len(self.curGraphTransitions) == 0:
return
elif self.areaRando == True and self.escapeRando == False:
if len(self.curGraphTransitions) == len(self.bossTransitions) + len(self.escapeTransition):
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
elif [startPoint, endPoint] in self.escapeTransition or [endPoint, startPoint] in self.escapeTransition:
return
elif self.bossRando == True and self.escapeRando == False:
if len(self.curGraphTransitions) == len(self.areaTransitions) + len(self.escapeTransition):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
elif [startPoint, endPoint] in self.escapeTransition or [endPoint, startPoint] in self.escapeTransition:
return
elif self.areaRando == True and self.escapeRando == True:
if len(self.curGraphTransitions) == len(self.bossTransitions):
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
elif self.bossRando == True and self.escapeRando == True:
if len(self.curGraphTransitions) == len(self.areaTransitions):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
elif self.escapeRando == True and self.areaRando == False and self.bossRando == False:
if len(self.curGraphTransitions) == len(self.areaTransitions) + len(self.bossTransitions):
return
elif [startPoint, endPoint] in self.areaTransitions or [endPoint, startPoint] in self.areaTransitions:
return
elif [startPoint, endPoint] in self.bossTransitions or [endPoint, startPoint] in self.bossTransitions:
return
# remove transition
self.curGraphTransitions.pop(i)
def clearTransitions(self):
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
def clearLocs(self, locs):
for loc in locs:
loc.difficulty = None
def getDiffThreshold(self):
# in interactive solver we don't have the max difficulty parameter
epsilon = 0.001
return hard - epsilon
# byteIndex is area index
bossBitMasks = {
"Kraid": {"byteIndex": 0x01, "bitMask": 0x01},
"Ridley": {"byteIndex": 0x02, "bitMask": 0x01},
"Phantoon": {"byteIndex": 0x03, "bitMask": 0x01},
"Draygon": {"byteIndex": 0x04, "bitMask": 0x01},
"Mother Brain": {"byteIndex": 0x05, "bitMask": 0x02},
"Spore Spawn": {"byteIndex": 0x01, "bitMask": 0x02},
"Crocomire": {"byteIndex": 0x02, "bitMask": 0x02},
"Botwoon": {"byteIndex": 0x04, "bitMask": 0x02},
"Golden Torizo": {"byteIndex": 0x02, "bitMask": 0x04}
}
areaAccessPoints = {
"Lower Mushrooms Left": {"byteIndex": 36, "bitMask": 1, "room": 0x9969, "area": "Crateria"},
"Green Pirates Shaft Bottom Right": {"byteIndex": 37, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
"Moat Right": {"byteIndex": 148, "bitMask": 4, "room": 0x95ff, "area": "Crateria"},
"Keyhunter Room Bottom": {"byteIndex": 156, "bitMask": 32, "room": 0x948c, "area": "Crateria"},
"Morph Ball Room Left": {"byteIndex": 46, "bitMask": 4, "room": 0x9e9f, "area": "Brinstar"},
"Green Brinstar Elevator": {"byteIndex": 36, "bitMask": 2, "room": 0x9938, "area": "Crateria"},
"Green Hill Zone Top Right": {"byteIndex": 46, "bitMask": 8, "room": 0x9e52, "area": "Brinstar"},
"Noob Bridge Right": {"byteIndex": 184, "bitMask": 128, "room": 0x9fba, "area": "Brinstar"},
"West Ocean Left": {"byteIndex": 148, "bitMask": 2, "room": 0x93fe, "area": "Crateria"},
"Crab Maze Left": {"byteIndex": 170, "bitMask": 4, "room": 0x957d, "area": "Crateria"},
"Lava Dive Right": {"byteIndex": 47, "bitMask": 64, "room": 0xaf14, "area": "Norfair"},
"Three Muskateers Room Left": {"byteIndex": 19, "bitMask": 2, "room": 0xb656, "area": "Norfair"},
"Warehouse Zeela Room Left": {"byteIndex": 205, "bitMask": 8, "room": 0xa471, "area": "Brinstar"},
"Warehouse Entrance Left": {"byteIndex": 205, "bitMask": 64, "room": 0xa6a1, "area": "Brinstar"},
"Warehouse Entrance Right": {"byteIndex": 205, "bitMask": 16, "room": 0xa6a1, "area": "Brinstar"},
"Single Chamber Top Right": {"byteIndex": 19, "bitMask": 4, "room": 0xad5e, "area": "Norfair"},
"Kronic Boost Room Bottom Left": {"byteIndex": 47, "bitMask": 32, "room": 0xae74, "area": "Norfair"},
"Crocomire Speedway Bottom": {"byteIndex": 41, "bitMask": 1, "room": 0xa923, "area": "Norfair"},
"Crocomire Room Top": {"byteIndex": 45, "bitMask": 1, "room": 0xa98d, "area": "Norfair"},
"Main Street Bottom": {"byteIndex": 69, "bitMask": 16, "room": 0xcfc9, "area": "Maridia"},
"Crab Hole Bottom Left": {"byteIndex": 74, "bitMask": 128, "room": 0xd21c, "area": "Maridia"},
"Red Fish Room Left": {"byteIndex": 33, "bitMask": 8, "room": 0xd104, "area": "Maridia"},
"Crab Shaft Right": {"byteIndex": 46, "bitMask": 16, "room": 0xd1a3, "area": "Maridia"},
"Aqueduct Top Left": {"byteIndex": 46, "bitMask": 8, "room": 0xd5a7, "area": "Maridia"},
"Le Coude Right": {"byteIndex": 170, "bitMask": 8, "room": 0x95a8, "area": "Crateria"},
"Red Tower Top Left": {"byteIndex": 184, "bitMask": 64, "room": 0xa253, "area": "Brinstar"},
"Caterpillar Room Top Right": {"byteIndex": 160, "bitMask": 1, "room": 0xa322, "area": "Brinstar"},
"Red Brinstar Elevator": {"byteIndex": 160, "bitMask": 32, "room": 0x962a, "area": "Crateria"},
"East Tunnel Right": {"byteIndex": 77, "bitMask": 8, "room": 0xcf80, "area": "Maridia"},
"East Tunnel Top Right": {"byteIndex": 73, "bitMask": 1, "room": 0xcf80, "area": "Maridia"},
"Glass Tunnel Top": {"byteIndex": 73, "bitMask": 16, "room": 0xcefb, "area": "Maridia"},
"Golden Four": {"byteIndex": 37, "bitMask": 8, "room": 0xa5ed, "area": "Crateria"}
}
bossAccessPoints = {
"PhantoonRoomOut": {"byteIndex": 82, "bitMask": 32, "room": 0xcc6f, "area": "WreckedShip"},
"PhantoonRoomIn": {"byteIndex": 82, "bitMask": 16, "room": 0xcd13, "area": "WreckedShip"},
"RidleyRoomOut": {"byteIndex": 71, "bitMask": 128, "room": 0xb37a, "area": "Norfair"},
"RidleyRoomIn": {"byteIndex": 70, "bitMask": 1, "room": 0xb32e, "area": "Norfair"},
"KraidRoomOut": {"byteIndex": 210, "bitMask": 2, "room": 0xa56b, "area": "Brinstar"},
"KraidRoomIn": {"byteIndex": 210, "bitMask": 1, "room": 0xa59f, "area": "Brinstar"},
"DraygonRoomOut": {"byteIndex": 169, "bitMask": 64, "room": 0xd78f, "area": "Maridia"},
"DraygonRoomIn": {"byteIndex": 169, "bitMask": 128, "room": 0xda60, "area": "Maridia"}
}
nothingScreens = {
"Energy Tank, Gauntlet": {"byteIndex": 14, "bitMask": 64, "room": 0x965b, "area": "Crateria"},
"Bomb": {"byteIndex": 31, "bitMask": 64, "room": 0x9804, "area": "Crateria"},
"Energy Tank, Terminator": {"byteIndex": 29, "bitMask": 8, "room": 0x990d, "area": "Crateria"},
"Reserve Tank, Brinstar": {"byteIndex": 21, "bitMask": 4, "room": 0x9c07, "area": "Brinstar"},
"Charge Beam": {"byteIndex": 50, "bitMask": 64, "room": 0x9d19, "area": "Brinstar"},
"Morphing Ball": {"byteIndex": 47, "bitMask": 64, "room": 0x9e9f, "area": "Brinstar"},
"Energy Tank, Brinstar Ceiling": {"byteIndex": 47, "bitMask": 1, "room": 0x9f64, "area": "Brinstar"},
"Energy Tank, Etecoons": {"byteIndex": 44, "bitMask": 2, "room": 0xa011, "area": "Brinstar"},
"Energy Tank, Waterway": {"byteIndex": 57, "bitMask": 128, "room": 0xa0d2, "area": "Brinstar"},
"Energy Tank, Brinstar Gate": {"byteIndex": 38, "bitMask": 4, "room": 0xa15b, "area": "Brinstar"},
"X-Ray Scope": {"byteIndex": 66, "bitMask": 1, "room": 0xa2ce, "area": "Brinstar"},
"Spazer": {"byteIndex": 200, "bitMask": 2, "room": 0xa447, "area": "Brinstar"},
"Energy Tank, Kraid": {"byteIndex": 209, "bitMask": 16, "room": 0xa4b1, "area": "Brinstar"},
"Varia Suit": {"byteIndex": 211, "bitMask": 64, "room": 0xa6e2, "area": "Brinstar"},
"Ice Beam": {"byteIndex": 12, "bitMask": 4, "room": 0xa890, "area": "Norfair"},
"Energy Tank, Crocomire": {"byteIndex": 46, "bitMask": 16, "room": 0xa98d, "area": "Norfair"},
"Hi-Jump Boots": {"byteIndex": 28, "bitMask": 1, "room": 0xa9e5, "area": "Norfair"},
"Grapple Beam": {"byteIndex": 68, "bitMask": 16, "room": 0xac2b, "area": "Norfair"},
"Reserve Tank, Norfair": {"byteIndex": 14, "bitMask": 32, "room": 0xac5a, "area": "Norfair"},
"Speed Booster": {"byteIndex": 140, "bitMask": 4, "room": 0xad1b, "area": "Norfair"},
"Wave Beam": {"byteIndex": 23, "bitMask": 4, "room": 0xadde, "area": "Norfair"},
"Energy Tank, Ridley": {"byteIndex": 74, "bitMask": 2, "room": 0xb698, "area": "Norfair"},
"Screw Attack": {"byteIndex": 70, "bitMask": 8, "room": 0xb6c1, "area": "Norfair"},
"Energy Tank, Firefleas": {"byteIndex": 176, "bitMask": 4, "room": 0xb6ee, "area": "Norfair"},
"Reserve Tank, Wrecked Ship": {"byteIndex": 49, "bitMask": 1, "room": 0xc98e, "area": "WreckedShip"},
"Energy Tank, Wrecked Ship": {"byteIndex": 58, "bitMask": 32, "room": 0xcc27, "area": "WreckedShip"},
"Right Super, Wrecked Ship": {"byteIndex": 74, "bitMask": 4, "room": 0xcdf1, "area": "WreckedShip"},
"Gravity Suit": {"byteIndex": 57, "bitMask": 32, "room": 0xce40, "area": "WreckedShip"},
"Energy Tank, Mama turtle": {"byteIndex": 54, "bitMask": 16, "room": 0xd055, "area": "Maridia"},
"Plasma Beam": {"byteIndex": 15, "bitMask": 8, "room": 0xd2aa, "area": "Maridia"},
"Reserve Tank, Maridia": {"byteIndex": 62, "bitMask": 8, "room": 0xd4ef, "area": "Maridia"},
"Spring Ball": {"byteIndex": 196, "bitMask": 64, "room": 0xd6d0, "area": "Maridia"},
"Energy Tank, Botwoon": {"byteIndex": 39, "bitMask": 4, "room": 0xd7e4, "area": "Maridia"},
"Space Jump": {"byteIndex": 172, "bitMask": 2, "room": 0xd9aa, "area": "Maridia"},
"Power Bomb (Crateria surface)": {"byteIndex": 136, "bitMask": 64, "room": 0x93aa, "area": "Crateria"},
"Missile (outside Wrecked Ship bottom)": {"byteIndex": 152, "bitMask": 2, "room": 0x93fe, "area": "Crateria"},
"Missile (outside Wrecked Ship top)": {"byteIndex": 132, "bitMask": 1, "room": 0x93fe, "area": "Crateria"},
"Missile (outside Wrecked Ship middle)": {"byteIndex": 140, "bitMask": 2, "room": 0x93fe, "area": "Crateria"},
"Missile (Crateria moat)": {"byteIndex": 148, "bitMask": 8, "room": 0x95ff, "area": "Crateria"},
"Missile (Crateria bottom)": {"byteIndex": 78, "bitMask": 8, "room": 0x975c, "area": "Crateria"},
"Missile (Crateria gauntlet right)": {"byteIndex": 17, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
"Missile (Crateria gauntlet left)": {"byteIndex": 17, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
"Super Missile (Crateria)": {"byteIndex": 43, "bitMask": 128, "room": 0x99f9, "area": "Crateria"},
"Missile (Crateria middle)": {"byteIndex": 34, "bitMask": 128, "room": 0x9a90, "area": "Crateria"},
"Power Bomb (green Brinstar bottom)": {"byteIndex": 33, "bitMask": 8, "room": 0x9ad9, "area": "Brinstar"},
"Super Missile (pink Brinstar)": {"byteIndex": 43, "bitMask": 128, "room": 0x9b5b, "area": "Brinstar"},
"Missile (green Brinstar below super missile)": {"byteIndex": 21, "bitMask": 16, "room": 0x9bc8, "area": "Brinstar"},
"Super Missile (green Brinstar top)": {"byteIndex": 17, "bitMask": 32, "room": 0x9bc8, "area": "Brinstar"},
"Missile (green Brinstar behind missile)": {"byteIndex": 21, "bitMask": 2, "room": 0x9c07, "area": "Brinstar"},
"Missile (green Brinstar behind reserve tank)": {"byteIndex": 21, "bitMask": 2, "room": 0x9c07, "area": "Brinstar"},
"Missile (pink Brinstar top)": {"byteIndex": 34, "bitMask": 64, "room": 0x9d19, "area": "Brinstar"},
"Missile (pink Brinstar bottom)": {"byteIndex": 46, "bitMask": 64, "room": 0x9d19, "area": "Brinstar"},
"Power Bomb (pink Brinstar)": {"byteIndex": 37, "bitMask": 1, "room": 0x9e11, "area": "Brinstar"},
"Missile (green Brinstar pipe)": {"byteIndex": 50, "bitMask": 2, "room": 0x9e52, "area": "Brinstar"},
"Power Bomb (blue Brinstar)": {"byteIndex": 46, "bitMask": 1, "room": 0x9e9f, "area": "Brinstar"},
"Missile (blue Brinstar middle)": {"byteIndex": 172, "bitMask": 128, "room": 0x9f64, "area": "Brinstar"},
"Super Missile (green Brinstar bottom)": {"byteIndex": 44, "bitMask": 4, "room": 0xa051, "area": "Brinstar"},
"Missile (blue Brinstar bottom)": {"byteIndex": 51, "bitMask": 8, "room": 0xa107, "area": "Brinstar"},
"Missile (blue Brinstar top)": {"byteIndex": 39, "bitMask": 4, "room": 0xa1d8, "area": "Brinstar"},
"Missile (blue Brinstar behind missile)": {"byteIndex": 39, "bitMask": 4, "room": 0xa1d8, "area": "Brinstar"},
"Power Bomb (red Brinstar sidehopper room)": {"byteIndex": 164, "bitMask": 16, "room": 0xa37c, "area": "Brinstar"},
"Power Bomb (red Brinstar spike room)": {"byteIndex": 176, "bitMask": 16, "room": 0xa3ae, "area": "Brinstar"},
"Missile (red Brinstar spike room)": {"byteIndex": 176, "bitMask": 32, "room": 0xa3ae, "area": "Brinstar"},
"Missile (Kraid)": {"byteIndex": 205, "bitMask": 1, "room": 0xa4da, "area": "Brinstar"},
"Missile (lava room)": {"byteIndex": 22, "bitMask": 128, "room": 0xa788, "area": "Norfair"},
"Missile (below Ice Beam)": {"byteIndex": 20, "bitMask": 32, "room": 0xa8f8, "area": "Norfair"},
"Missile (above Crocomire)": {"byteIndex": 29, "bitMask": 16, "room": 0xaa0e, "area": "Norfair"},
"Missile (Hi-Jump Boots)": {"byteIndex": 25, "bitMask": 128, "room": 0xaa41, "area": "Norfair"},
"Energy Tank (Hi-Jump Boots)": {"byteIndex": 25, "bitMask": 64, "room": 0xaa41, "area": "Norfair"},
"Power Bomb (Crocomire)": {"byteIndex": 45, "bitMask": 64, "room": 0xaade, "area": "Norfair"},
"Missile (below Crocomire)": {"byteIndex": 65, "bitMask": 2, "room": 0xab3b, "area": "Norfair"},
"Missile (Grapple Beam)": {"byteIndex": 65, "bitMask": 128, "room": 0xab8f, "area": "Norfair"},
"Missile (Norfair Reserve Tank)": {"byteIndex": 14, "bitMask": 32, "room": 0xac5a, "area": "Norfair"},
"Missile (bubble Norfair green door)": {"byteIndex": 14, "bitMask": 4, "room": 0xac83, "area": "Norfair"},
"Missile (bubble Norfair)": {"byteIndex": 26, "bitMask": 1, "room": 0xacb3, "area": "Norfair"},
"Missile (Speed Booster)": {"byteIndex": 140, "bitMask": 8, "room": 0xacf0, "area": "Norfair"},
"Missile (Wave Beam)": {"byteIndex": 23, "bitMask": 32, "room": 0xadad, "area": "Norfair"},
"Missile (Gold Torizo)": {"byteIndex": 66, "bitMask": 32, "room": 0xb283, "area": "Norfair"},
"Super Missile (Gold Torizo)": {"byteIndex": 66, "bitMask": 16, "room": 0xb283, "area": "Norfair"},
"Missile (Mickey Mouse room)": {"byteIndex": 47, "bitMask": 8, "room": 0xb40a, "area": "Norfair"},
"Missile (lower Norfair above fire flea room)": {"byteIndex": 152, "bitMask": 16, "room": 0xb510, "area": "Norfair"},
"Power Bomb (lower Norfair above fire flea room)": {"byteIndex": 156, "bitMask": 4, "room": 0xb55a, "area": "Norfair"},
"Power Bomb (Power Bombs of shame)": {"byteIndex": 188, "bitMask": 128, "room": 0xb5d5, "area": "Norfair"},
"Missile (lower Norfair near Wave Beam)": {"byteIndex": 27, "bitMask": 4, "room": 0xb656, "area": "Norfair"},
"Missile (Wrecked Ship middle)": {"byteIndex": 69, "bitMask": 8, "room": 0xcaf6, "area": "WreckedShip"},
"Missile (Gravity Suit)": {"byteIndex": 57, "bitMask": 4, "room": 0xc98e, "area": "WreckedShip"},
"Missile (Wrecked Ship top)": {"byteIndex": 46, "bitMask": 4, "room": 0xcaae, "area": "WreckedShip"},
"Super Missile (Wrecked Ship left)": {"byteIndex": 73, "bitMask": 1, "room": 0xcda8, "area": "WreckedShip"},
"Missile (green Maridia shinespark)": {"byteIndex": 53, "bitMask": 32, "room": 0xcfc9, "area": "Maridia"},
"Super Missile (green Maridia)": {"byteIndex": 49, "bitMask": 16, "room": 0xcfc9, "area": "Maridia"},
"Missile (green Maridia tatori)": {"byteIndex": 58, "bitMask": 16, "room": 0xd055, "area": "Maridia"},
# TODO::check these two if they are not swapped on the map ?
"Super Missile (yellow Maridia)": {"byteIndex": 29, "bitMask": 8, "room": 0xd13b, "area": "Maridia"},
"Missile (yellow Maridia super missile)": {"byteIndex": 29, "bitMask": 8, "room": 0xd13b, "area": "Maridia"},
"Missile (yellow Maridia false wall)": {"byteIndex": 30, "bitMask": 8, "room": 0xd1dd, "area": "Maridia"},
"Missile (left Maridia sand pit room)": {"byteIndex": 62, "bitMask": 8, "room": 0xd4ef, "area": "Maridia"},
"Missile (right Maridia sand pit room)": {"byteIndex": 62, "bitMask": 1, "room": 0xd51e, "area": "Maridia"},
"Power Bomb (right Maridia sand pit room)": {"byteIndex": 67, "bitMask": 128, "room": 0xd51e, "area": "Maridia"},
"Missile (pink Maridia)": {"byteIndex": 43, "bitMask": 128, "room": 0xd5a7, "area": "Maridia"},
"Super Missile (pink Maridia)": {"byteIndex": 43, "bitMask": 64, "room": 0xd5a7, "area": "Maridia"},
"Missile (Draygon)": {"byteIndex": 161, "bitMask": 32, "room": 0xd78f, "area": "Maridia"}
}
doorsScreen = {
# crateria
'LandingSiteRight': {"byteIndex": 23, "bitMask": 1, "room": 0x91f8, "area": "Crateria"},
'LandingSiteTopRight': {"byteIndex": 11, "bitMask": 1, "room": 0x91f8, "area": "Crateria"},
'KihunterBottom': {"byteIndex": 156, "bitMask": 32, "room": 0x948c, "area": "Crateria"},
'KihunterRight': {"byteIndex": 148, "bitMask": 16, "room": 0x948c, "area": "Crateria"},
'FlywayRight': {"byteIndex": 31, "bitMask": 128, "room": 0x9879, "area": "Crateria"},
'GreenPiratesShaftBottomRight': {"byteIndex": 37, "bitMask": 16, "room": 0x99bd, "area": "Crateria"},
'RedBrinstarElevatorTop': {"byteIndex": 160, "bitMask": 32, "room": 0x962a, "area": "Crateria"},
'ClimbRight': {"byteIndex": 70, "bitMask": 8, "room": 0x96ba, "area": "Crateria"},
# blue brinstar
'ConstructionZoneRight': {"byteIndex": 47, "bitMask": 4, "room": 0x9f11, "area": "Brinstar"},
# green brinstar
'GreenHillZoneTopRight': {"byteIndex": 46, "bitMask": 8, "room": 0x9e52, "area": "Brinstar"},
'NoobBridgeRight': {"byteIndex": 184, "bitMask": 128, "room": 0x9fba, "area": "Brinstar"},
'MainShaftRight': {"byteIndex": 21, "bitMask": 64, "room": 0x9ad9, "area": "Brinstar"},
'MainShaftBottomRight': {"byteIndex": 29, "bitMask": 64, "room": 0x9ad9, "area": "Brinstar"},
'EarlySupersRight': {"byteIndex": 21, "bitMask": 8, "room": 0x9bc8, "area": "Brinstar"},
'EtecoonEnergyTankLeft': {"byteIndex": 44, "bitMask": 2, "room": 0xa011, "area": "Brinstar"},
# pink brinstar
'BigPinkTopRight': {"byteIndex": 22, "bitMask": 32, "room": 0x9d19, "area": "Brinstar"},
'BigPinkRight': {"byteIndex": 38, "bitMask": 32, "room": 0x9d19, "area": "Brinstar"},
'BigPinkBottomRight': {"byteIndex": 46, "bitMask": 32, "room": 0x9d19, "area": "Brinstar"},
'BigPinkBottomLeft': {"byteIndex": 57, "bitMask": 1, "room": 0x9d19, "area": "Brinstar"},
# red brinstar
'RedTowerLeft': {"byteIndex": 192, "bitMask": 64, "room": 0xa253, "area": "Brinstar"},
'RedBrinstarFirefleaLeft': {"byteIndex": 67, "bitMask": 64, "room": 0xa293, "area": "Brinstar"},
'RedTowerElevatorTopLeft': {"byteIndex": 160, "bitMask": 4, "room": 0xa322, "area": "Brinstar"},
'RedTowerElevatorLeft': {"byteIndex": 168, "bitMask": 4, "room": 0xa322, "area": "Brinstar"},
'RedTowerElevatorBottomLeft': {"byteIndex": 176, "bitMask": 4, "room": 0xa322, "area": "Brinstar"},
'BelowSpazerTopRight': {"byteIndex": 200, "bitMask": 4, "room": 0xa408, "area": "Brinstar"},
# Wrecked ship
'WestOceanRight': {"byteIndex": 149, "bitMask": 4, "room": 0x93fe, "area": "Crateria"},
'LeCoudeBottom': {"byteIndex": 170, "bitMask": 8, "room": 0x95a8, "area": "Crateria"},
'WreckedShipMainShaftBottom': {"byteIndex": 78, "bitMask": 128, "room": 0xcaf6, "area": "WreckedShip"},
'ElectricDeathRoomTopLeft': {"byteIndex": 58, "bitMask": 4, "room": 0xcbd5, "area": "WreckedShip"},
# Upper Norfair
'BusinessCenterTopLeft': {"byteIndex": 17, "bitMask": 32, "room": 0xa7de, "area": "Norfair"},
'BusinessCenterBottomLeft': {"byteIndex": 25, "bitMask": 32, "room": 0xa7de, "area": "Norfair"},
'CathedralEntranceRight': {"byteIndex": 17, "bitMask": 4, "room": 0xa7b3, "area": "Norfair"},
'CathedralRight': {"byteIndex": 22, "bitMask": 128, "room": 0xa788, "area": "Norfair"},
'BubbleMountainTopRight': {"byteIndex": 14, "bitMask": 1, "room": 0xacb3, "area": "Norfair"},
'BubbleMountainTopLeft': {"byteIndex": 14, "bitMask": 2, "room": 0xacb3, "area": "Norfair"},
'SpeedBoosterHallRight': {"byteIndex": 140, "bitMask": 8, "room": 0xacf0, "area": "Norfair"},
'SingleChamberRight': {"byteIndex": 23, "bitMask": 128, "room": 0xad5e, "area": "Norfair"},
'DoubleChamberRight': {"byteIndex": 23, "bitMask": 8, "room": 0xadad, "area": "Norfair"},
'KronicBoostBottomLeft': {"byteIndex": 47, "bitMask": 32, "room": 0xae74, "area": "Norfair"},
'CrocomireSpeedwayBottom': {"byteIndex": 41, "bitMask": 1, "room": 0xa923, "area": "Norfair"},
# Crocomire
'PostCrocomireUpperLeft': {"byteIndex": 45, "bitMask": 32, "room": 0xaa82, "area": "Norfair"},
'PostCrocomireShaftRight': {"byteIndex": 65, "bitMask": 32, "room": 0xab07, "area": "Norfair"},
# Lower Norfair
'RedKihunterShaftBottom': {"byteIndex": 184, "bitMask": 4, "room": 0xb585, "area": "Norfair"},
'WastelandLeft': {"byteIndex": 196, "bitMask": 64, "room": 0xb5d5, "area": "Norfair"},
# Maridia
'MainStreetBottomRight': {"byteIndex": 69, "bitMask": 16, "room": 0xcfc9, "area": "Maridia"},
'FishTankRight': {"byteIndex": 66, "bitMask": 128, "room": 0xd017, "area": "Maridia"},
'CrabShaftRight': {"byteIndex": 46, "bitMask": 16, "room": 0xd1a3, "area": "Maridia"},
'ColosseumBottomRight': {"byteIndex": 161, "bitMask": 128, "room": 0xd72a, "area": "Maridia"},
'PlasmaSparkBottom': {"byteIndex": 22, "bitMask": 2, "room": 0xd340, "area": "Maridia"},
'OasisTop': {"byteIndex": 66, "bitMask": 2, "room": 0xd48e, "area": "Maridia"}
}
mapOffsetEnum = {
"Crateria": 0,
"Brinstar": 0x100,
"Norfair": 0x200,
"WreckedShip": 0x300,
"Maridia": 0x400
}
def importDump(self, dumpFileName):
with open(dumpFileName, 'r') as jsonFile:
dumpData = json.load(jsonFile)
# first update current access point
self.lastAP = dumpData["newAP"]
dataEnum = {
"state": '1',
"map": '2',
"curMap": '3',
"samus": '4',
"items": '5',
"boss": '6'
}
currentState = dumpData["currentState"]
self.locDelta = 0
for dataType, offset in dumpData["stateDataOffsets"].items():
if dataType == dataEnum["items"]:
# get item data, loop on all locations to check if they have been visited
for loc in self.locations:
# loc id is used to index in the items data, boss locations don't have an Id
if loc.Id is None:
continue
# nothing locs are handled later
if loc.itemName == 'Nothing':
continue
byteIndex = loc.Id >> 3
bitMask = 0x01 << (loc.Id & 7)
if currentState[offset + byteIndex] & bitMask != 0:
if loc not in self.visitedLocations:
self.pickItemAt(self.locNameInternal2Web(loc.Name))
self.locDelta += 1
else:
if loc in self.visitedLocations:
self.removeItemAt(self.locNameInternal2Web(loc.Name))
elif dataType == dataEnum["boss"]:
for boss, bossData in self.bossBitMasks.items():
byteIndex = bossData["byteIndex"]
bitMask = bossData["bitMask"]
loc = self.getLoc(boss)
if currentState[offset + byteIndex] & bitMask != 0:
if loc not in self.visitedLocations:
self.pickItemAt(self.locNameInternal2Web(loc.Name))
self.locDelta += 1
else:
if loc in self.visitedLocations:
self.removeItemAt(self.locNameInternal2Web(loc.Name))
elif dataType == dataEnum["map"]:
if self.areaRando or self.bossRando:
availAPs = set()
for apName, apData in self.areaAccessPoints.items():
if self.isElemAvailable(currentState, offset, apData):
availAPs.add(apName)
for apName, apData in self.bossAccessPoints.items():
if self.isElemAvailable(currentState, offset, apData):
availAPs.add(apName)
# static transitions
if self.areaRando == True and self.bossRando == True:
staticTransitions = []
possibleTransitions = self.bossTransitions + self.areaTransitions
elif self.areaRando == True:
staticTransitions = self.bossTransitions[:]
possibleTransitions = self.areaTransitions[:]
elif self.bossRando == True:
staticTransitions = self.areaTransitions[:]
possibleTransitions = self.bossTransitions[:]
if self.escapeRando == False:
staticTransitions += self.escapeTransition
# remove static transitions from current transitions
dynamicTransitions = self.curGraphTransitions[:]
for transition in self.curGraphTransitions:
if transition in staticTransitions:
dynamicTransitions.remove(transition)
# remove dynamic transitions not visited
for transition in dynamicTransitions:
if transition[0] not in availAPs and transition[1] not in availAPs:
self.curGraphTransitions.remove(transition)
# add new transitions
for transition in possibleTransitions:
if transition[0] in availAPs and transition[1] in availAPs:
self.curGraphTransitions.append(transition)
if self.hasNothing:
# get locs with nothing
locsNothing = [loc for loc in self.locations if loc.itemName == 'Nothing']
for loc in locsNothing:
locData = self.nothingScreens[loc.Name]
if self.isElemAvailable(currentState, offset, locData):
# nothing has been seen, check if loc is already visited
if not loc in self.visitedLocations:
# visit it
self.pickItemAt(self.locNameInternal2Web(loc.Name))
self.locDelta += 1
else:
# nothing not yet seed, check if loc is already visited
if loc in self.visitedLocations:
# unvisit it
self.removeItemAt(self.locNameInternal2Web(loc.Name))
if self.doorsRando:
# get currently hidden / revealed doors names in sets
(hiddenDoors, revealedDoor) = DoorsManager.getDoorsState()
for doorName in hiddenDoors:
# check if door is still hidden
doorData = self.doorsScreen[doorName]
if self.isElemAvailable(currentState, offset, doorData):
DoorsManager.switchVisibility(doorName)
for doorName in revealedDoor:
# check if door is still visible
doorData = self.doorsScreen[doorName]
if not self.isElemAvailable(currentState, offset, doorData):
DoorsManager.switchVisibility(doorName)
def isElemAvailable(self, currentState, offset, apData):
byteIndex = apData["byteIndex"]
bitMask = apData["bitMask"]
return currentState[offset + byteIndex + self.mapOffsetEnum[apData["area"]]] & bitMask != 0
|
theonlydude/RandomMetroidSolver
|
solver/interactiveSolver.py
|
Python
|
mit
| 55,648
|
[
"VisIt"
] |
3c1e519dc418a075751085cdfb67987192a1410647a5623222a47dfc9f7a2c82
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import espressopp.unittest
from espressopp.interaction.FENE import *
from espressopp import Real3D, infinity
class Test0FENE(espressopp.unittest.TestCase) :
def test0Energy(self) :
fene=FENE(K=1.0, r0=1.0, rMax=0.5)
# root = minimum
self.assertAlmostEqual(fene.computeEnergy(1.0), 0.0)
self.assertAlmostEqual(fene.computeEnergy(1.0, 0.0, 0.0), 0.0)
self.assertAlmostEqual((fene.computeForce(1.0, 0.0, 0.0) - Real3D(0.0, 0.0, 0.0)).sqr(), 0.0)
if __name__ == "__main__":
unittest.main()
|
MrTheodor/espressopp
|
testsuite/interaction_potentials/unittest/PTestFENE.py
|
Python
|
gpl-3.0
| 1,424
|
[
"ESPResSo"
] |
0ec43c982d9c2c15394a8e519ade99f2840785aadd3a793c7349d1229da72baa
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 16.3.8
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'}
DOCUMENTATION = '''
---
module: avi_healthmonitor
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of HealthMonitor Avi RESTful Object
description:
- This module is used to configure HealthMonitor object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
description:
description:
- User defined description for the object.
dns_monitor:
description:
- Healthmonitordns settings for healthmonitor.
external_monitor:
description:
- Healthmonitorexternal settings for healthmonitor.
failed_checks:
description:
- Number of continuous failed health checks before the server is marked down.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
http_monitor:
description:
- Healthmonitorhttp settings for healthmonitor.
https_monitor:
description:
- Healthmonitorhttp settings for healthmonitor.
monitor_port:
description:
- Use this port instead of the port defined for the server in the pool.
- If the monitor succeeds to this port, the load balanced traffic will still be sent to the port of the server defined within the pool.
name:
description:
- A user friendly name for this health monitor.
required: true
receive_timeout:
description:
- A valid response from the server is expected within the receive timeout window.
- This timeout must be less than the send interval.
- If server status is regularly flapping up and down, consider increasing this value.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
send_interval:
description:
- Frequency, in seconds, that monitors are sent to a server.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
successful_checks:
description:
- Number of continuous successful health checks before server is marked up.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
tcp_monitor:
description:
- Healthmonitortcp settings for healthmonitor.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Type of the health monitor.
required: true
udp_monitor:
description:
- Healthmonitorudp settings for healthmonitor.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the health monitor.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a HTTPS health monitor
avi_healthmonitor:
controller: 10.10.27.90
username: admin
password: AviNetworks123!
https_monitor:
http_request: HEAD / HTTP/1.0
http_response_code:
- HTTP_2XX
- HTTP_3XX
receive_timeout: 4
failed_checks: 3
send_interval: 10
successful_checks: 3
type: HEALTH_MONITOR_HTTPS
name: MyWebsite-HTTPS
'''
RETURN = '''
obj:
description: HealthMonitor (api/healthmonitor) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi_ansible_utils import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
description=dict(type='str',),
dns_monitor=dict(type='dict',),
external_monitor=dict(type='dict',),
failed_checks=dict(type='int',),
http_monitor=dict(type='dict',),
https_monitor=dict(type='dict',),
monitor_port=dict(type='int',),
name=dict(type='str', required=True),
receive_timeout=dict(type='int',),
send_interval=dict(type='int',),
successful_checks=dict(type='int',),
tcp_monitor=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
udp_monitor=dict(type='dict',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=16.3.5.post1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'healthmonitor',
set([]))
if __name__ == '__main__':
main()
|
0x46616c6b/ansible
|
lib/ansible/modules/network/avi/avi_healthmonitor.py
|
Python
|
gpl-3.0
| 6,100
|
[
"VisIt"
] |
2aafe9cc0824eb189828b706b043d8026c4c192ac56acd4ecebec7586368218f
|
#__docformat__ = "restructuredtext en"
# ******NOTICE***************
# optimize.py module by Travis E. Oliphant
#
# You may copy and use this module as you see fit with no
# guarantee implied provided you keep this notice in all copies.
# *****END NOTICE************
# A collection of optimization algorithms. Version 0.5
# CHANGES
# Added fminbound (July 2001)
# Added brute (Aug. 2002)
# Finished line search satisfying strong Wolfe conditions (Mar. 2004)
# Updated strong Wolfe conditions line search to use
# cubic-interpolation (Mar. 2004)
from __future__ import division, print_function, absolute_import
# Minimization routines
__all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg',
'fminbound', 'brent', 'golden', 'bracket', 'rosen', 'rosen_der',
'rosen_hess', 'rosen_hess_prod', 'brute', 'approx_fprime',
'line_search', 'check_grad', 'OptimizeResult', 'show_options',
'OptimizeWarning']
__docformat__ = "restructuredtext en"
import warnings
import sys
import numpy
from scipy._lib.six import callable
from numpy import (atleast_1d, eye, mgrid, argmin, zeros, shape, squeeze,
vectorize, asarray, sqrt, Inf, asfarray, isinf)
import numpy as np
from .linesearch import (line_search_wolfe1, line_search_wolfe2,
line_search_wolfe2 as line_search,
LineSearchWarning)
from scipy._lib._util import getargspec_no_self as _getargspec
# standard status messages of optimizers
_status_message = {'success': 'Optimization terminated successfully.',
'maxfev': 'Maximum number of function evaluations has '
'been exceeded.',
'maxiter': 'Maximum number of iterations has been '
'exceeded.',
'pr_loss': 'Desired error not necessarily achieved due '
'to precision loss.'}
class MemoizeJac(object):
""" Decorator that caches the value gradient of function each time it
is called. """
def __init__(self, fun):
self.fun = fun
self.jac = None
self.x = None
def __call__(self, x, *args):
self.x = numpy.asarray(x).copy()
fg = self.fun(x, *args)
self.jac = fg[1]
return fg[0]
def derivative(self, x, *args):
if self.jac is not None and numpy.alltrue(x == self.x):
return self.jac
else:
self(x, *args)
return self.jac
class OptimizeResult(dict):
""" Represents the optimization result.
Attributes
----------
x : ndarray
The solution of the optimization.
success : bool
Whether or not the optimizer exited successfully.
status : int
Termination status of the optimizer. Its value depends on the
underlying solver. Refer to `message` for details.
message : str
Description of the cause of the termination.
fun, jac, hess: ndarray
Values of objective function, its Jacobian and its Hessian (if
available). The Hessians may be approximations, see the documentation
of the function in question.
hess_inv : object
Inverse of the objective function's Hessian; may be an approximation.
Not available for all solvers. The type of this attribute may be
either np.ndarray or scipy.sparse.linalg.LinearOperator.
nfev, njev, nhev : int
Number of evaluations of the objective functions and of its
Jacobian and Hessian.
nit : int
Number of iterations performed by the optimizer.
maxcv : float
The maximum constraint violation.
Notes
-----
There may be additional attributes not listed above depending of the
specific solver. Since this class is essentially a subclass of dict
with attribute accessors, one can see which attributes are available
using the `keys()` method.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in self.items()])
else:
return self.__class__.__name__ + "()"
class OptimizeWarning(UserWarning):
pass
def _check_unknown_options(unknown_options):
if unknown_options:
msg = ", ".join(map(str, unknown_options.keys()))
# Stack level 4: this is called from _minimize_*, which is
# called from another function in Scipy. Level 4 is the first
# level in user code.
warnings.warn("Unknown solver options: %s" % msg, OptimizeWarning, 4)
def is_array_scalar(x):
"""Test whether `x` is either a scalar or an array scalar.
"""
return np.size(x) == 1
_epsilon = sqrt(numpy.finfo(float).eps)
def vecnorm(x, ord=2):
if ord == Inf:
return numpy.amax(numpy.abs(x))
elif ord == -Inf:
return numpy.amin(numpy.abs(x))
else:
return numpy.sum(numpy.abs(x)**ord, axis=0)**(1.0 / ord)
def rosen(x):
"""
The Rosenbrock function.
The function computed is::
sum(100.0*(x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0
Parameters
----------
x : array_like
1-D array of points at which the Rosenbrock function is to be computed.
Returns
-------
f : float
The value of the Rosenbrock function.
See Also
--------
rosen_der, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
r = numpy.sum(100.0 * (x[1:] - x[:-1]**2.0)**2.0 + (1 - x[:-1])**2.0,
axis=0)
return r
def rosen_der(x):
"""
The derivative (i.e. gradient) of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the derivative is to be computed.
Returns
-------
rosen_der : (N,) ndarray
The gradient of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_hess, rosen_hess_prod
"""
x = asarray(x)
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = (200 * (xm - xm_m1**2) -
400 * (xm_p1 - xm**2) * xm - 2 * (1 - xm))
der[0] = -400 * x[0] * (x[1] - x[0]**2) - 2 * (1 - x[0])
der[-1] = 200 * (x[-1] - x[-2]**2)
return der
def rosen_hess(x):
"""
The Hessian matrix of the Rosenbrock function.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
Returns
-------
rosen_hess : ndarray
The Hessian matrix of the Rosenbrock function at `x`.
See Also
--------
rosen, rosen_der, rosen_hess_prod
"""
x = atleast_1d(x)
H = numpy.diag(-400 * x[:-1], 1) - numpy.diag(400 * x[:-1], -1)
diagonal = numpy.zeros(len(x), dtype=x.dtype)
diagonal[0] = 1200 * x[0]**2 - 400 * x[1] + 2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200 * x[1:-1]**2 - 400 * x[2:]
H = H + numpy.diag(diagonal)
return H
def rosen_hess_prod(x, p):
"""
Product of the Hessian matrix of the Rosenbrock function with a vector.
Parameters
----------
x : array_like
1-D array of points at which the Hessian matrix is to be computed.
p : array_like
1-D array, the vector to be multiplied by the Hessian matrix.
Returns
-------
rosen_hess_prod : ndarray
The Hessian matrix of the Rosenbrock function at `x` multiplied
by the vector `p`.
See Also
--------
rosen, rosen_der, rosen_hess
"""
x = atleast_1d(x)
Hp = numpy.zeros(len(x), dtype=x.dtype)
Hp[0] = (1200 * x[0]**2 - 400 * x[1] + 2) * p[0] - 400 * x[0] * p[1]
Hp[1:-1] = (-400 * x[:-2] * p[:-2] +
(202 + 1200 * x[1:-1]**2 - 400 * x[2:]) * p[1:-1] -
400 * x[1:-1] * p[2:])
Hp[-1] = -400 * x[-2] * p[-2] + 200*p[-1]
return Hp
def wrap_function(function, args):
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(*wrapper_args):
ncalls[0] += 1
return function(*(wrapper_args + args))
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
xtol : float, optional
Relative error in xopt acceptable for convergence.
ftol : number, optional
Relative error in func(xopt) acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall}
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
fcalls, func = wrap_function(func, args)
x0 = asfarray(x0).flatten()
N = len(x0)
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
one2np1 = list(range(1, N + 1))
sim = numpy.zeros((N + 1, N), dtype=x0.dtype)
fsim = numpy.zeros((N + 1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
fsim[0] = func(x0)
nonzdelt = 0.05
zdelt = 0.00025
for k in range(0, N):
y = numpy.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
f = func(y)
fsim[k + 1] = f
ind = numpy.argsort(fsim)
fsim = numpy.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = numpy.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
if (numpy.max(numpy.ravel(numpy.abs(sim[1:] - sim[0]))) <= xtol and
numpy.max(numpy.abs(fsim[0] - fsim[1:])) <= ftol):
break
xbar = numpy.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
fsim[j] = func(sim[j])
ind = numpy.argsort(fsim)
sim = numpy.take(sim, ind, 0)
fsim = numpy.take(fsim, ind, 0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = numpy.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print('Warning: ' + msg)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print('Warning: ' + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iterations)
print(" Function evaluations: %d" % fcalls[0])
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def _approx_fprime_helper(xk, f, epsilon, args=(), f0=None):
"""
See ``approx_fprime``. An optional initial function value arg is added.
"""
if f0 is None:
f0 = f(*((xk,) + args))
grad = numpy.zeros((len(xk),), float)
ei = numpy.zeros((len(xk),), float)
for k in range(len(xk)):
ei[k] = 1.0
d = epsilon * ei
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ei[k] = 0.0
return grad
def approx_fprime(xk, f, epsilon, *args):
"""Finite-difference approximation of the gradient of a scalar function.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
The function of which to determine the gradient (partial derivatives).
Should take `xk` as first argument, other arguments to `f` can be
supplied in ``*args``. Should return a scalar, the value of the
function at `xk`.
epsilon : array_like
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`.
\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
grad : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
The main use of `approx_fprime` is in scalar function optimizers like
`fmin_bfgs`, to determine numerically the Jacobian of a function.
Examples
--------
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004198])
"""
return _approx_fprime_helper(xk, f, epsilon, args=args)
def check_grad(func, grad, x0, *args, **kwargs):
"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Gradient of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(numpy.finfo(float).eps)``, which is approximately 1.49e-08.
Returns
-------
err : float
The square root of the sum of squares (i.e. the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08
"""
step = kwargs.pop('epsilon', _epsilon)
if kwargs:
raise ValueError("Unknown keyword arguments: %r" %
(list(kwargs.keys()),))
return sqrt(sum((grad(x0, *args) -
approx_fprime(x0, func, step, *args))**2))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
f2 = fprime(*((x0 + epsilon*p,) + args))
f1 = fprime(*((x0,) + args))
return (f2 - f1) / epsilon
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
with warnings.catch_warnings():
warnings.simplefilter('ignore', LineSearchWarning)
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
if ret[0] is None:
raise _LineSearchError()
return ret
def fmin_bfgs(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1,
retall=0, callback=None):
"""
Minimize a function using the BFGS algorithm.
Parameters
----------
f : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable f'(x,*args), optional
Gradient of f.
args : tuple, optional
Extra arguments passed to f and fprime.
gtol : float, optional
Gradient norm must be less than gtol before successful termination.
norm : float, optional
Order of norm (Inf is max, -Inf is min)
epsilon : int or ndarray, optional
If fprime is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function to call after each
iteration. Called as callback(xk), where xk is the
current parameter vector.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True,return fopt, func_calls, grad_calls, and warnflag
in addition to xopt.
disp : bool, optional
Print convergence message if True.
retall : bool, optional
Return a list of results at each iteration if True.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. f(xopt) == fopt.
fopt : float
Minimum value.
gopt : ndarray
Value of gradient at minimum, f'(xopt), which should be near 0.
Bopt : ndarray
Value of 1/f''(xopt), i.e. the inverse hessian matrix.
func_calls : int
Number of function_calls made.
grad_calls : int
Number of gradient calls made.
warnflag : integer
1 : Maximum number of iterations exceeded.
2 : Gradient and/or function calls not changing.
allvecs : list
`OptimizeResult` at each iteration. Only returned if retall is True.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'BFGS' `method` in particular.
Notes
-----
Optimize the function, f, whose gradient is given by fprime
using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
and Shanno (BFGS)
References
----------
Wright, and Nocedal 'Numerical Optimization', 1999, pg. 198.
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['jac'], res['hess_inv'],
res['nfev'], res['njev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
BFGS algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if x0.ndim == 0:
x0.shape = (1,)
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
N = len(x0)
I = numpy.eye(N, dtype=int)
Hk = I
old_fval = f(x0)
old_old_fval = None
xk = x0
if retall:
allvecs = [x0]
sk = [2 * gtol]
warnflag = 0
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
pk = -numpy.dot(Hk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xkp1 = xk + alpha_k * pk
if retall:
allvecs.append(xkp1)
sk = xkp1 - xk
xk = xkp1
if gfkp1 is None:
gfkp1 = myfprime(xkp1)
yk = gfkp1 - gfk
gfk = gfkp1
if callback is not None:
callback(xk)
k += 1
gnorm = vecnorm(gfk, ord=norm)
if (gnorm <= gtol):
break
if not numpy.isfinite(old_fval):
# We correctly found +-Inf as optimal value, or something went
# wrong.
warnflag = 2
break
try: # this was handled in numeric, let it remaines for more safety
rhok = 1.0 / (numpy.dot(yk, sk))
except ZeroDivisionError:
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
if isinf(rhok): # this is patch for numpy
rhok = 1000.0
if disp:
print("Divide-by-zero encountered: rhok assumed large")
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok
Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + (rhok * sk[:, numpy.newaxis] *
sk[numpy.newaxis, :])
fval = old_fval
if np.isnan(fval):
# This can happen if the first call to f returned NaN;
# the loop is then never entered.
warnflag = 2
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, hess_inv=Hk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_cg(f, x0, fprime=None, args=(), gtol=1e-5, norm=Inf, epsilon=_epsilon,
maxiter=None, full_output=0, disp=1, retall=0, callback=None):
"""
Minimize a function using a nonlinear conjugate gradient algorithm.
Parameters
----------
f : callable, ``f(x, *args)``
Objective function to be minimized. Here `x` must be a 1-D array of
the variables that are to be changed in the search for a minimum, and
`args` are the other (fixed) parameters of `f`.
x0 : ndarray
A user-supplied initial estimate of `xopt`, the optimal value of `x`.
It must be a 1-D array of values.
fprime : callable, ``fprime(x, *args)``, optional
A function that returns the gradient of `f` at `x`. Here `x` and `args`
are as described above for `f`. The returned value must be a 1-D array.
Defaults to None, in which case the gradient is approximated
numerically (see `epsilon`, below).
args : tuple, optional
Parameter values passed to `f` and `fprime`. Must be supplied whenever
additional fixed parameters are needed to completely specify the
functions `f` and `fprime`.
gtol : float, optional
Stop when the norm of the gradient is less than `gtol`.
norm : float, optional
Order to use for the norm of the gradient
(``-np.Inf`` is min, ``np.Inf`` is max).
epsilon : float or ndarray, optional
Step size(s) to use when `fprime` is approximated numerically. Can be a
scalar or a 1-D array. Defaults to ``sqrt(eps)``, with eps the
floating point machine precision. Usually ``sqrt(eps)`` is about
1.5e-8.
maxiter : int, optional
Maximum number of iterations to perform. Default is ``200 * len(x0)``.
full_output : bool, optional
If True, return `fopt`, `func_calls`, `grad_calls`, and `warnflag` in
addition to `xopt`. See the Returns section below for additional
information on optional return values.
disp : bool, optional
If True, return a convergence message, followed by `xopt`.
retall : bool, optional
If True, add to the returned values the results of each iteration.
callback : callable, optional
An optional user-supplied function, called after each iteration.
Called as ``callback(xk)``, where ``xk`` is the current value of `x0`.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float, optional
Minimum value found, f(xopt). Only returned if `full_output` is True.
func_calls : int, optional
The number of function_calls made. Only returned if `full_output`
is True.
grad_calls : int, optional
The number of gradient calls made. Only returned if `full_output` is
True.
warnflag : int, optional
Integer value with warning status, only returned if `full_output` is
True.
0 : Success.
1 : The maximum number of iterations was exceeded.
2 : Gradient and/or function calls were not changing. May indicate
that precision was lost, i.e., the routine did not converge.
allvecs : list of ndarray, optional
List of arrays, containing the results at each iteration.
Only returned if `retall` is True.
See Also
--------
minimize : common interface to all `scipy.optimize` algorithms for
unconstrained and constrained minimization of multivariate
functions. It provides an alternative way to call
``fmin_cg``, by specifying ``method='CG'``.
Notes
-----
This conjugate gradient algorithm is based on that of Polak and Ribiere
[1]_.
Conjugate gradient methods tend to work better when:
1. `f` has a unique global minimizing point, and no local minima or
other stationary points,
2. `f` is, at least locally, reasonably well approximated by a
quadratic function of the variables,
3. `f` is continuous and has a continuous gradient,
4. `fprime` is not too large, e.g., has a norm less than 1000,
5. The initial guess, `x0`, is reasonably close to `f` 's global
minimizing point, `xopt`.
References
----------
.. [1] Wright & Nocedal, "Numerical Optimization", 1999, pp. 120-122.
Examples
--------
Example 1: seek the minimum value of the expression
``a*u**2 + b*u*v + c*v**2 + d*u + e*v + f`` for given values
of the parameters and an initial guess ``(u, v) = (0, 0)``.
>>> args = (2, 3, 7, 8, 9, 10) # parameter values
>>> def f(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... return a*u**2 + b*u*v + c*v**2 + d*u + e*v + f
>>> def gradf(x, *args):
... u, v = x
... a, b, c, d, e, f = args
... gu = 2*a*u + b*v + d # u-component of the gradient
... gv = b*u + 2*c*v + e # v-component of the gradient
... return np.asarray((gu, gv))
>>> x0 = np.asarray((0, 0)) # Initial guess.
>>> from scipy import optimize
>>> res1 = optimize.fmin_cg(f, x0, fprime=gradf, args=args)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
>>> res1
array([-1.80851064, -0.25531915])
Example 2: solve the same problem using the `minimize` function.
(This `myopts` dictionary shows all of the available options,
although in practice only non-default values would be needed.
The returned value will be a dictionary.)
>>> opts = {'maxiter' : None, # default value.
... 'disp' : True, # non-default value.
... 'gtol' : 1e-5, # default value.
... 'norm' : np.inf, # default value.
... 'eps' : 1.4901161193847656e-08} # default value.
>>> res2 = optimize.minimize(f, x0, jac=gradf, args=args,
... method='CG', options=opts)
Optimization terminated successfully.
Current function value: 1.617021
Iterations: 2
Function evaluations: 5
Gradient evaluations: 5
>>> res2.x # minimum found
array([-1.80851064, -0.25531915])
"""
opts = {'gtol': gtol,
'norm': norm,
'eps': epsilon,
'disp': disp,
'maxiter': maxiter,
'return_all': retall}
res = _minimize_cg(f, x0, args, fprime, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nfev'], res['njev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_cg(fun, x0, args=(), jac=None, callback=None,
gtol=1e-5, norm=Inf, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
conjugate gradient algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter : int
Maximum number of iterations to perform.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
norm : float
Order of norm (Inf is max, -Inf is min).
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
f = fun
fprime = jac
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
if maxiter is None:
maxiter = len(x0) * 200
func_calls, f = wrap_function(f, args)
if fprime is None:
grad_calls, myfprime = wrap_function(approx_fprime, (f, epsilon))
else:
grad_calls, myfprime = wrap_function(fprime, args)
gfk = myfprime(x0)
k = 0
xk = x0
old_fval = f(xk)
old_old_fval = None
if retall:
allvecs = [xk]
warnflag = 0
pk = -gfk
gnorm = vecnorm(gfk, ord=norm)
while (gnorm > gtol) and (k < maxiter):
deltak = numpy.dot(gfk, gfk)
try:
alpha_k, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, myfprime, xk, pk, gfk, old_fval,
old_old_fval, c2=0.4)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
xk = xk + alpha_k * pk
if retall:
allvecs.append(xk)
if gfkp1 is None:
gfkp1 = myfprime(xk)
yk = gfkp1 - gfk
beta_k = max(0, numpy.dot(yk, gfkp1) / deltak)
pk = -gfkp1 + beta_k * pk
gfk = gfkp1
gnorm = vecnorm(gfk, ord=norm)
if callback is not None:
callback(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % func_calls[0])
print(" Gradient evaluations: %d" % grad_calls[0])
result = OptimizeResult(fun=fval, jac=gfk, nfev=func_calls[0],
njev=grad_calls[0], status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fmin_ncg(f, x0, fprime, fhess_p=None, fhess=None, args=(), avextol=1e-5,
epsilon=_epsilon, maxiter=None, full_output=0, disp=1, retall=0,
callback=None):
"""
Unconstrained minimization of a function using the Newton-CG method.
Parameters
----------
f : callable ``f(x, *args)``
Objective function to be minimized.
x0 : ndarray
Initial guess.
fprime : callable ``f'(x, *args)``
Gradient of f.
fhess_p : callable ``fhess_p(x, p, *args)``, optional
Function which computes the Hessian of f times an
arbitrary vector, p.
fhess : callable ``fhess(x, *args)``, optional
Function to compute the Hessian matrix of f.
args : tuple, optional
Extra arguments passed to f, fprime, fhess_p, and fhess
(the same set of extra arguments is supplied to all of
these functions).
epsilon : float or ndarray, optional
If fhess is approximated, use this value for the step size.
callback : callable, optional
An optional user-supplied function which is called after
each iteration. Called as callback(xk), where xk is the
current parameter vector.
avextol : float, optional
Convergence is assumed when the average relative error in
the minimizer falls below this amount.
maxiter : int, optional
Maximum number of iterations to perform.
full_output : bool, optional
If True, return the optional outputs.
disp : bool, optional
If True, print convergence message.
retall : bool, optional
If True, return a list of results at each iteration.
Returns
-------
xopt : ndarray
Parameters which minimize f, i.e. ``f(xopt) == fopt``.
fopt : float
Value of the function at xopt, i.e. ``fopt = f(xopt)``.
fcalls : int
Number of function calls made.
gcalls : int
Number of gradient calls made.
hcalls : int
Number of hessian calls made.
warnflag : int
Warnings generated by the algorithm.
1 : Maximum number of iterations exceeded.
allvecs : list
The result at each iteration, if retall is True (see below).
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Newton-CG' `method` in particular.
Notes
-----
Only one of `fhess_p` or `fhess` need to be given. If `fhess`
is provided, then `fhess_p` will be ignored. If neither `fhess`
nor `fhess_p` is provided, then the hessian product will be
approximated using finite differences on `fprime`. `fhess_p`
must compute the hessian times an arbitrary vector. If it is not
given, finite-differences on `fprime` are used to compute
it.
Newton-CG methods are also called truncated Newton methods. This
function differs from scipy.optimize.fmin_tnc because
1. scipy.optimize.fmin_ncg is written purely in python using numpy
and scipy while scipy.optimize.fmin_tnc calls a C function.
2. scipy.optimize.fmin_ncg is only for unconstrained minimization
while scipy.optimize.fmin_tnc is for unconstrained minimization
or box constrained minimization. (Box constraints give
lower and upper bounds for each variable separately.)
References
----------
Wright & Nocedal, 'Numerical Optimization', 1999, pg. 140.
"""
opts = {'xtol': avextol,
'eps': epsilon,
'maxiter': maxiter,
'disp': disp,
'return_all': retall}
res = _minimize_newtoncg(f, x0, args, fprime, fhess, fhess_p,
callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['nfev'], res['njev'],
res['nhev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
callback=None, xtol=1e-5, eps=_epsilon, maxiter=None,
disp=False, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Note that the `jac` parameter (Jacobian) is required.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Average relative error in solution `xopt` acceptable for
convergence.
maxiter : int
Maximum number of iterations to perform.
eps : float or ndarray
If `jac` is approximated, use this value for the step size.
"""
_check_unknown_options(unknown_options)
if jac is None:
raise ValueError('Jacobian is required for Newton-CG method')
f = fun
fprime = jac
fhess_p = hessp
fhess = hess
avextol = xtol
epsilon = eps
retall = return_all
x0 = asarray(x0).flatten()
fcalls, f = wrap_function(f, args)
gcalls, fprime = wrap_function(fprime, args)
hcalls = 0
if maxiter is None:
maxiter = len(x0)*200
xtol = len(x0) * avextol
update = [2 * xtol]
xk = x0
if retall:
allvecs = [xk]
k = 0
old_fval = f(x0)
old_old_fval = None
float64eps = numpy.finfo(numpy.float64).eps
warnflag = 0
while (numpy.add.reduce(numpy.abs(update)) > xtol) and (k < maxiter):
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - grad f(xk) starting from 0.
b = -fprime(xk)
maggrad = numpy.add.reduce(numpy.abs(b))
eta = numpy.min([0.5, numpy.sqrt(maggrad)])
termcond = eta * maggrad
xsupi = zeros(len(x0), dtype=x0.dtype)
ri = -b
psupi = -ri
i = 0
dri0 = numpy.dot(ri, ri)
if fhess is not None: # you want to compute hessian once.
A = fhess(*(xk,) + args)
hcalls = hcalls + 1
while numpy.add.reduce(numpy.abs(ri)) > termcond:
if fhess is None:
if fhess_p is None:
Ap = approx_fhess_p(xk, psupi, fprime, epsilon)
else:
Ap = fhess_p(xk, psupi, *args)
hcalls = hcalls + 1
else:
Ap = numpy.dot(A, psupi)
# check curvature
Ap = asarray(Ap).squeeze() # get rid of matrices...
curv = numpy.dot(psupi, Ap)
if 0 <= curv <= 3 * float64eps:
break
elif curv < 0:
if (i > 0):
break
else:
# fall back to steepest descent direction
xsupi = dri0 / (-curv) * b
break
alphai = dri0 / curv
xsupi = xsupi + alphai * psupi
ri = ri + alphai * Ap
dri1 = numpy.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update numpy.dot(ri,ri) for next time.
pk = xsupi # search direction is solution to system.
gfk = -b # gradient at xk
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(f, fprime, xk, pk, gfk,
old_fval, old_old_fval)
except _LineSearchError:
# Line search failed to find a better solution.
warnflag = 2
break
update = alphak * pk
xk = xk + update # upcast if necessary
if callback is not None:
callback(xk)
if retall:
allvecs.append(xk)
k += 1
fval = old_fval
if warnflag == 2:
msg = _status_message['pr_loss']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
elif k >= maxiter:
warnflag = 1
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % k)
print(" Function evaluations: %d" % fcalls[0])
print(" Gradient evaluations: %d" % gcalls[0])
print(" Hessian evaluations: %d" % hcalls)
result = OptimizeResult(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0],
nhev=hcalls, status=warnflag,
success=(warnflag == 0), message=msg, x=xk,
nit=k)
if retall:
result['allvecs'] = allvecs
return result
def fminbound(func, x1, x2, args=(), xtol=1e-5, maxfun=500,
full_output=0, disp=1):
"""Bounded minimization for scalar functions.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized (must accept and return scalars).
x1, x2 : float or array scalar
The optimization bounds.
args : tuple, optional
Extra arguments passed to function.
xtol : float, optional
The convergence tolerance.
maxfun : int, optional
Maximum number of function evaluations allowed.
full_output : bool, optional
If True, return optional outputs.
disp : int, optional
If non-zero, print messages.
0 : no message printing.
1 : non-convergence notification messages only.
2 : print a message on convergence too.
3 : print iteration results.
Returns
-------
xopt : ndarray
Parameters (over given interval) which minimize the
objective function.
fval : number
The function value at the minimum point.
ierr : int
An error flag (0 if converged, 1 if maximum number of
function calls reached).
numfunc : int
The number of function calls made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Bounded' `method` in particular.
Notes
-----
Finds a local minimizer of the scalar function `func` in the
interval x1 < xopt < x2 using Brent's method. (See `brent`
for auto-bracketing).
"""
options = {'xatol': xtol,
'maxiter': maxfun,
'disp': disp}
res = _minimize_scalar_bounded(func, (x1, x2), args, **options)
if full_output:
return res['x'], res['fun'], res['status'], res['nfev']
else:
return res['x']
def _minimize_scalar_bounded(func, bounds, args=(),
xatol=1e-5, maxiter=500, disp=0,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
xatol : float
Absolute error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
maxfun = maxiter
# Test bounds are of correct form
if len(bounds) != 2:
raise ValueError('bounds must have two elements.')
x1, x2 = bounds
if not (is_array_scalar(x1) and is_array_scalar(x2)):
raise ValueError("Optimisation bounds must be scalars"
" or array scalars.")
if x1 > x2:
raise ValueError("The lower bound exceeds the upper bound.")
flag = 0
header = ' Func-count x f(x) Procedure'
step = ' initial'
sqrt_eps = sqrt(2.2e-16)
golden_mean = 0.5 * (3.0 - sqrt(5.0))
a, b = x1, x2
fulc = a + golden_mean * (b - a)
nfc, xf = fulc, fulc
rat = e = 0.0
x = xf
fx = func(x, *args)
num = 1
fmin_data = (1, xf, fx)
ffulc = fnfc = fx
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if disp > 2:
print(" ")
print(header)
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
while (numpy.abs(xf - xm) > (tol2 - 0.5 * (b - a))):
golden = 1
# Check for parabolic fit
if numpy.abs(e) > tol1:
golden = 0
r = (xf - nfc) * (fx - ffulc)
q = (xf - fulc) * (fx - fnfc)
p = (xf - fulc) * q - (xf - nfc) * r
q = 2.0 * (q - r)
if q > 0.0:
p = -p
q = numpy.abs(q)
r = e
e = rat
# Check for acceptability of parabola
if ((numpy.abs(p) < numpy.abs(0.5*q*r)) and (p > q*(a - xf)) and
(p < q * (b - xf))):
rat = (p + 0.0) / q
x = xf + rat
step = ' parabolic'
if ((x - a) < tol2) or ((b - x) < tol2):
si = numpy.sign(xm - xf) + ((xm - xf) == 0)
rat = tol1 * si
else: # do a golden section step
golden = 1
if golden: # Do a golden-section step
if xf >= xm:
e = a - xf
else:
e = b - xf
rat = golden_mean*e
step = ' golden'
si = numpy.sign(rat) + (rat == 0)
x = xf + si * numpy.max([numpy.abs(rat), tol1])
fu = func(x, *args)
num += 1
fmin_data = (num, x, fu)
if disp > 2:
print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)))
if fu <= fx:
if x >= xf:
a = xf
else:
b = xf
fulc, ffulc = nfc, fnfc
nfc, fnfc = xf, fx
xf, fx = x, fu
else:
if x < xf:
a = x
else:
b = x
if (fu <= fnfc) or (nfc == xf):
fulc, ffulc = nfc, fnfc
nfc, fnfc = x, fu
elif (fu <= ffulc) or (fulc == xf) or (fulc == nfc):
fulc, ffulc = x, fu
xm = 0.5 * (a + b)
tol1 = sqrt_eps * numpy.abs(xf) + xatol / 3.0
tol2 = 2.0 * tol1
if num >= maxfun:
flag = 1
break
fval = fx
if disp > 0:
_endprint(x, flag, fval, maxfun, xatol, disp)
result = OptimizeResult(fun=fval, status=flag, success=(flag == 0),
message={0: 'Solution found.',
1: 'Maximum number of function calls '
'reached.'}.get(flag, ''),
x=xf, nfev=num)
return result
class Brent:
#need to rethink design of __init__
def __init__(self, func, args=(), tol=1.48e-8, maxiter=500,
full_output=0):
self.func = func
self.args = args
self.tol = tol
self.maxiter = maxiter
self._mintol = 1.0e-11
self._cg = 0.3819660
self.xmin = None
self.fval = None
self.iter = 0
self.funcalls = 0
# need to rethink design of set_bracket (new options, etc)
def set_bracket(self, brack=None):
self.brack = brack
def get_bracket_info(self):
#set up
func = self.func
args = self.args
brack = self.brack
### BEGIN core bracket_info code ###
### carefully DOCUMENT any CHANGES in core ##
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be "
"length 2 or 3 sequence.")
### END core bracket_info code ###
return xa, xb, xc, fa, fb, fc, funcalls
def optimize(self):
# set up for optimization
func = self.func
xa, xb, xc, fa, fb, fc, funcalls = self.get_bracket_info()
_mintol = self._mintol
_cg = self._cg
#################################
#BEGIN CORE ALGORITHM
#################################
x = w = v = xb
fw = fv = fx = func(*((x,) + self.args))
if (xa < xc):
a = xa
b = xc
else:
a = xc
b = xa
deltax = 0.0
funcalls = 1
iter = 0
while (iter < self.maxiter):
tol1 = self.tol * numpy.abs(x) + _mintol
tol2 = 2.0 * tol1
xmid = 0.5 * (a + b)
# check for convergence
if numpy.abs(x - xmid) < (tol2 - 0.5 * (b - a)):
break
# XXX In the first iteration, rat is only bound in the true case
# of this conditional. This used to cause an UnboundLocalError
# (gh-4140). It should be set before the if (but to what?).
if (numpy.abs(deltax) <= tol1):
if (x >= xmid):
deltax = a - x # do a golden section step
else:
deltax = b - x
rat = _cg * deltax
else: # do a parabolic step
tmp1 = (x - w) * (fx - fv)
tmp2 = (x - v) * (fx - fw)
p = (x - v) * tmp2 - (x - w) * tmp1
tmp2 = 2.0 * (tmp2 - tmp1)
if (tmp2 > 0.0):
p = -p
tmp2 = numpy.abs(tmp2)
dx_temp = deltax
deltax = rat
# check parabolic fit
if ((p > tmp2 * (a - x)) and (p < tmp2 * (b - x)) and
(numpy.abs(p) < numpy.abs(0.5 * tmp2 * dx_temp))):
rat = p * 1.0 / tmp2 # if parabolic step is useful.
u = x + rat
if ((u - a) < tol2 or (b - u) < tol2):
if xmid - x >= 0:
rat = tol1
else:
rat = -tol1
else:
if (x >= xmid):
deltax = a - x # if it's not do a golden section step
else:
deltax = b - x
rat = _cg * deltax
if (numpy.abs(rat) < tol1): # update by at least tol1
if rat >= 0:
u = x + tol1
else:
u = x - tol1
else:
u = x + rat
fu = func(*((u,) + self.args)) # calculate new output value
funcalls += 1
if (fu > fx): # if it's bigger than current
if (u < x):
a = u
else:
b = u
if (fu <= fw) or (w == x):
v = w
w = u
fv = fw
fw = fu
elif (fu <= fv) or (v == x) or (v == w):
v = u
fv = fu
else:
if (u >= x):
a = x
else:
b = x
v = w
w = x
x = u
fv = fw
fw = fx
fx = fu
iter += 1
#################################
#END CORE ALGORITHM
#################################
self.xmin = x
self.fval = fx
self.iter = iter
self.funcalls = funcalls
def get_result(self, full_output=False):
if full_output:
return self.xmin, self.fval, self.iter, self.funcalls
else:
return self.xmin
def brent(func, args=(), brack=None, tol=1.48e-8, full_output=0, maxiter=500):
"""
Given a function of one-variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable f(x,*args)
Objective function.
args : tuple, optional
Additional arguments (if present).
brack : tuple, optional
Either a triple (xa,xb,xc) where xa<xb<xc and func(xb) <
func(xa), func(xc) or a pair (xa,xb) which are used as a
starting interval for a downhill bracket search (see
`bracket`). Providing the pair (xa,xb) does not always mean
the obtained solution will satisfy xa<=x<=xb.
tol : float, optional
Stop if between iteration change is less than `tol`.
full_output : bool, optional
If True, return all output args (xmin, fval, iter,
funcalls).
maxiter : int, optional
Maximum number of iterations in solution.
Returns
-------
xmin : ndarray
Optimum point.
fval : float
Optimum value.
iter : int
Number of iterations.
funcalls : int
Number of objective function evaluations made.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Brent' `method` in particular.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
options = {'xtol': tol,
'maxiter': maxiter}
res = _minimize_scalar_brent(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nit'], res['nfev']
else:
return res['x']
def _minimize_scalar_brent(func, brack=None, args=(),
xtol=1.48e-8, maxiter=500,
**unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
Notes
-----
Uses inverse parabolic interpolation when possible to speed up
convergence of golden section method.
"""
_check_unknown_options(unknown_options)
tol = xtol
if tol < 0:
raise ValueError('tolerance should be >= 0, got %r' % tol)
brent = Brent(func=func, args=args, tol=tol,
full_output=True, maxiter=maxiter)
brent.set_bracket(brack)
brent.optimize()
x, fval, nit, nfev = brent.get_result(full_output=True)
return OptimizeResult(fun=fval, x=x, nit=nit, nfev=nfev,
success=nit < maxiter)
def golden(func, args=(), brack=None, tol=_epsilon, full_output=0):
"""
Return the minimum of a function of one variable.
Given a function of one variable and a possible bracketing interval,
return the minimum of the function isolated to a fractional precision of
tol.
Parameters
----------
func : callable func(x,*args)
Objective function to minimize.
args : tuple, optional
Additional arguments (if present), passed to func.
brack : tuple, optional
Triple (a,b,c), where (a<b<c) and func(b) <
func(a),func(c). If bracket consists of two numbers (a,
c), then they are assumed to be a starting interval for a
downhill bracket search (see `bracket`); it doesn't always
mean that obtained solution will satisfy a<=x<=c.
tol : float, optional
x tolerance stop criterion
full_output : bool, optional
If True, return optional outputs.
See also
--------
minimize_scalar: Interface to minimization algorithms for scalar
univariate functions. See the 'Golden' `method` in particular.
Notes
-----
Uses analog of bisection method to decrease the bracketed
interval.
"""
options = {'xtol': tol}
res = _minimize_scalar_golden(func, brack, args, **options)
if full_output:
return res['x'], res['fun'], res['nfev']
else:
return res['x']
def _minimize_scalar_golden(func, brack=None, args=(),
xtol=_epsilon, **unknown_options):
"""
Options
-------
maxiter : int
Maximum number of iterations to perform.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
"""
_check_unknown_options(unknown_options)
tol = xtol
if brack is None:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, args=args)
elif len(brack) == 2:
xa, xb, xc, fa, fb, fc, funcalls = bracket(func, xa=brack[0],
xb=brack[1], args=args)
elif len(brack) == 3:
xa, xb, xc = brack
if (xa > xc): # swap so xa < xc can be assumed
xc, xa = xa, xc
if not ((xa < xb) and (xb < xc)):
raise ValueError("Not a bracketing interval.")
fa = func(*((xa,) + args))
fb = func(*((xb,) + args))
fc = func(*((xc,) + args))
if not ((fb < fa) and (fb < fc)):
raise ValueError("Not a bracketing interval.")
funcalls = 3
else:
raise ValueError("Bracketing interval must be length 2 or 3 sequence.")
_gR = 0.61803399
_gC = 1.0 - _gR
x3 = xc
x0 = xa
if (numpy.abs(xc - xb) > numpy.abs(xb - xa)):
x1 = xb
x2 = xb + _gC * (xc - xb)
else:
x2 = xb
x1 = xb - _gC * (xb - xa)
f1 = func(*((x1,) + args))
f2 = func(*((x2,) + args))
funcalls += 2
while (numpy.abs(x3 - x0) > tol * (numpy.abs(x1) + numpy.abs(x2))):
if (f2 < f1):
x0 = x1
x1 = x2
x2 = _gR * x1 + _gC * x3
f1 = f2
f2 = func(*((x2,) + args))
else:
x3 = x2
x2 = x1
x1 = _gR * x2 + _gC * x0
f2 = f1
f1 = func(*((x1,) + args))
funcalls += 1
if (f1 < f2):
xmin = x1
fval = f1
else:
xmin = x2
fval = f2
return OptimizeResult(fun=fval, nfev=funcalls, x=xmin)
def bracket(func, xa=0.0, xb=1.0, args=(), grow_limit=110.0, maxiter=1000):
"""
Bracket the minimum of the function.
Given a function and distinct initial points, search in the
downhill direction (as defined by the initital points) and return
new points xa, xb, xc that bracket the minimum of the function
f(xa) > f(xb) < f(xc). It doesn't always mean that obtained
solution will satisfy xa<=x<=xb
Parameters
----------
func : callable f(x,*args)
Objective function to minimize.
xa, xb : float, optional
Bracketing interval. Defaults `xa` to 0.0, and `xb` to 1.0.
args : tuple, optional
Additional arguments (if present), passed to `func`.
grow_limit : float, optional
Maximum grow limit. Defaults to 110.0
maxiter : int, optional
Maximum number of iterations to perform. Defaults to 1000.
Returns
-------
xa, xb, xc : float
Bracket.
fa, fb, fc : float
Objective function values in bracket.
funcalls : int
Number of function evaluations made.
"""
_gold = 1.618034
_verysmall_num = 1e-21
fa = func(*(xa,) + args)
fb = func(*(xb,) + args)
if (fa < fb): # Switch so fa > fb
xa, xb = xb, xa
fa, fb = fb, fa
xc = xb + _gold * (xb - xa)
fc = func(*((xc,) + args))
funcalls = 3
iter = 0
while (fc < fb):
tmp1 = (xb - xa) * (fb - fc)
tmp2 = (xb - xc) * (fb - fa)
val = tmp2 - tmp1
if numpy.abs(val) < _verysmall_num:
denom = 2.0 * _verysmall_num
else:
denom = 2.0 * val
w = xb - ((xb - xc) * tmp2 - (xb - xa) * tmp1) / denom
wlim = xb + grow_limit * (xc - xb)
if iter > maxiter:
raise RuntimeError("Too many iterations.")
iter += 1
if (w - xc) * (xb - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xa = xb
xb = w
fa = fb
fb = fw
return xa, xb, xc, fa, fb, fc, funcalls
elif (fw > fb):
xc = w
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(wlim - xc) >= 0.0:
w = wlim
fw = func(*((w,) + args))
funcalls += 1
elif (w - wlim)*(xc - w) > 0.0:
fw = func(*((w,) + args))
funcalls += 1
if (fw < fc):
xb = xc
xc = w
w = xc + _gold * (xc - xb)
fb = fc
fc = fw
fw = func(*((w,) + args))
funcalls += 1
else:
w = xc + _gold * (xc - xb)
fw = func(*((w,) + args))
funcalls += 1
xa = xb
xb = xc
xc = w
fa = fb
fb = fc
fc = fw
return xa, xb, xc, fa, fb, fc, funcalls
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha*xi)
alpha_min, fret, iter, num = brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p + xi, xi
def fmin_powell(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None):
"""
Minimize a function using modified Powell's method. This method
only uses function values, not derivatives.
Parameters
----------
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func.
callback : callable, optional
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray, optional
Initial direction set.
xtol : float, optional
Line-search error tolerance.
ftol : float, optional
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : int, optional
Maximum number of function evaluations to make.
full_output : bool, optional
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool, optional
If True, print convergence messages.
retall : bool, optional
If True, return a list of the solution at each iteration.
Returns
-------
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
See also
--------
minimize: Interface to unconstrained minimization algorithms for
multivariate functions. See the 'Powell' `method` in particular.
Notes
-----
Uses a modification of Powell's method to find the minimum of
a function of N variables. Powell's method is a conjugate
direction method.
The algorithm has two loops. The outer loop
merely iterates over the inner loop. The inner loop minimizes
over each current direction in the direction set. At the end
of the inner loop, if certain conditions are met, the direction
that gave the largest decrease is dropped and replaced with
the difference between the current estimated x and the estimated
x from the beginning of the inner-loop.
The technical conditions for replacing the direction of greatest
increase amount to checking that
1. No further gain can be made along the direction of greatest increase
from that iteration.
2. The direction of greatest increase accounted for a large sufficient
fraction of the decrease in the function value from that iteration of
the inner loop.
References
----------
Powell M.J.D. (1964) An efficient method for finding the minimum of a
function of several variables without calculating derivatives,
Computer Journal, 7 (2):155-162.
Press W., Teukolsky S.A., Vetterling W.T., and Flannery B.P.:
Numerical Recipes (any edition), Cambridge University Press
"""
opts = {'xtol': xtol,
'ftol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'direc': direc,
'return_all': retall}
res = _minimize_powell(func, x0, args, callback=callback, **opts)
if full_output:
retlist = (res['x'], res['fun'], res['direc'], res['nit'],
res['nfev'], res['status'])
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_powell(func, x0, args=(), callback=None,
xtol=1e-4, ftol=1e-4, maxiter=None, maxfev=None,
disp=False, direc=None, return_all=False,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
modified Powell algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
xtol : float
Relative error in solution `xopt` acceptable for convergence.
ftol : float
Relative error in ``fun(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfev : int
Maximum number of function evaluations to make.
direc : ndarray
Initial set of direction vectors for the Powell method.
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args)
x = asarray(x0).flatten()
if retall:
allvecs = [x]
N = len(x)
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0
ilist = list(range(N))
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol * 100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
bnd = ftol * (numpy.abs(fx) + numpy.abs(fval)) + 1e-20
if 2.0 * (fx - fval) <= bnd:
break
if fcalls[0] >= maxfun:
break
if iter >= maxiter:
break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx + fx2 - 2.0*fval)
temp = (fx - fval - delta)
t *= temp*temp
temp = fx - fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
print("Warning: " + msg)
elif iter >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
print("Warning: " + msg)
else:
msg = _status_message['success']
if disp:
print(msg)
print(" Current function value: %f" % fval)
print(" Iterations: %d" % iter)
print(" Function evaluations: %d" % fcalls[0])
x = squeeze(x)
result = OptimizeResult(fun=fval, direc=direc, nit=iter, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x)
if retall:
result['allvecs'] = allvecs
return result
def _endprint(x, flag, fval, maxfun, xtol, disp):
if flag == 0:
if disp > 1:
print("\nOptimization terminated successfully;\n"
"The returned value satisfies the termination criteria\n"
"(using xtol = ", xtol, ")")
if flag == 1:
if disp:
print("\nMaximum number of function evaluations exceeded --- "
"increase maxfun argument.\n")
return
def brute(func, ranges, args=(), Ns=20, full_output=0, finish=fmin,
disp=False):
"""Minimize a function over a given range by brute force.
Uses the "brute force" method, i.e. computes the function's value
at each point of a multidimensional grid of points, to find the global
minimum of the function.
The function is evaluated everywhere in the range with the datatype of the
first call to the function, as enforced by the ``vectorize`` NumPy
function. The value and type of the function evaluation returned when
``full_output=True`` are affected in addition by the ``finish`` argument
(see Notes).
Parameters
----------
func : callable
The objective function to be minimized. Must be in the
form ``f(x, *args)``, where ``x`` is the argument in
the form of a 1-D array and ``args`` is a tuple of any
additional fixed parameters needed to completely specify
the function.
ranges : tuple
Each component of the `ranges` tuple must be either a
"slice object" or a range tuple of the form ``(low, high)``.
The program uses these to create the grid of points on which
the objective function will be computed. See `Note 2` for
more detail.
args : tuple, optional
Any additional fixed parameters needed to completely specify
the function.
Ns : int, optional
Number of grid points along the axes, if not otherwise
specified. See `Note2`.
full_output : bool, optional
If True, return the evaluation grid and the objective function's
values on it.
finish : callable, optional
An optimization function that is called with the result of brute force
minimization as initial guess. `finish` should take `func` and
the initial guess as positional arguments, and take `args` as
keyword arguments. It may additionally take `full_output`
and/or `disp` as keyword arguments. Use None if no "polishing"
function is to be used. See Notes for more details.
disp : bool, optional
Set to True to print convergence messages.
Returns
-------
x0 : ndarray
A 1-D array containing the coordinates of a point at which the
objective function had its minimum value. (See `Note 1` for
which point is returned.)
fval : float
Function value at the point `x0`. (Returned when `full_output` is
True.)
grid : tuple
Representation of the evaluation grid. It has the same
length as `x0`. (Returned when `full_output` is True.)
Jout : ndarray
Function values at each point of the evaluation
grid, `i.e.`, ``Jout = func(*grid)``. (Returned
when `full_output` is True.)
See Also
--------
basinhopping, differential_evolution
Notes
-----
*Note 1*: The program finds the gridpoint at which the lowest value
of the objective function occurs. If `finish` is None, that is the
point returned. When the global minimum occurs within (or not very far
outside) the grid's boundaries, and the grid is fine enough, that
point will be in the neighborhood of the gobal minimum.
However, users often employ some other optimization program to
"polish" the gridpoint values, `i.e.`, to seek a more precise
(local) minimum near `brute's` best gridpoint.
The `brute` function's `finish` option provides a convenient way to do
that. Any polishing program used must take `brute's` output as its
initial guess as a positional argument, and take `brute's` input values
for `args` as keyword arguments, otherwise an error will be raised.
It may additionally take `full_output` and/or `disp` as keyword arguments.
`brute` assumes that the `finish` function returns either an
`OptimizeResult` object or a tuple in the form:
``(xmin, Jmin, ... , statuscode)``, where ``xmin`` is the minimizing
value of the argument, ``Jmin`` is the minimum value of the objective
function, "..." may be some other returned values (which are not used
by `brute`), and ``statuscode`` is the status code of the `finish` program.
Note that when `finish` is not None, the values returned are those
of the `finish` program, *not* the gridpoint ones. Consequently,
while `brute` confines its search to the input grid points,
the `finish` program's results usually will not coincide with any
gridpoint, and may fall outside the grid's boundary.
*Note 2*: The grid of points is a `numpy.mgrid` object.
For `brute` the `ranges` and `Ns` inputs have the following effect.
Each component of the `ranges` tuple can be either a slice object or a
two-tuple giving a range of values, such as (0, 5). If the component is a
slice object, `brute` uses it directly. If the component is a two-tuple
range, `brute` internally converts it to a slice object that interpolates
`Ns` points from its low-value to its high-value, inclusive.
Examples
--------
We illustrate the use of `brute` to seek the global minimum of a function
of two variables that is given as the sum of a positive-definite
quadratic and two deep "Gaussian-shaped" craters. Specifically, define
the objective function `f` as the sum of three other functions,
``f = f1 + f2 + f3``. We suppose each of these has a signature
``(z, *params)``, where ``z = (x, y)``, and ``params`` and the functions
are as defined below.
>>> params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)
>>> def f1(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)
>>> def f2(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))
>>> def f3(z, *params):
... x, y = z
... a, b, c, d, e, f, g, h, i, j, k, l, scale = params
... return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))
>>> def f(z, *params):
... return f1(z, *params) + f2(z, *params) + f3(z, *params)
Thus, the objective function may have local minima near the minimum
of each of the three functions of which it is composed. To
use `fmin` to polish its gridpoint result, we may then continue as
follows:
>>> rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))
>>> from scipy import optimize
>>> resbrute = optimize.brute(f, rranges, args=params, full_output=True,
... finish=optimize.fmin)
>>> resbrute[0] # global minimum
array([-1.05665192, 1.80834843])
>>> resbrute[1] # function value at global minimum
-3.4085818767
Note that if `finish` had been set to None, we would have gotten the
gridpoint [-1.0 1.75] where the rounded function value is -2.892.
"""
N = len(ranges)
if N > 40:
raise ValueError("Brute Force not possible with more "
"than 40 variables.")
lrange = list(ranges)
for k in range(N):
if type(lrange[k]) is not type(slice(None)):
if len(lrange[k]) < 3:
lrange[k] = tuple(lrange[k]) + (complex(Ns),)
lrange[k] = slice(*lrange[k])
if (N == 1):
lrange = lrange[0]
def _scalarfunc(*params):
params = squeeze(asarray(params))
return func(params, *args)
vecfunc = vectorize(_scalarfunc)
grid = mgrid[lrange]
if (N == 1):
grid = (grid,)
Jout = vecfunc(*grid)
Nshape = shape(Jout)
indx = argmin(Jout.ravel(), axis=-1)
Nindx = zeros(N, int)
xmin = zeros(N, float)
for k in range(N - 1, -1, -1):
thisN = Nshape[k]
Nindx[k] = indx % Nshape[k]
indx = indx // thisN
for k in range(N):
xmin[k] = grid[k][tuple(Nindx)]
Jmin = Jout[tuple(Nindx)]
if (N == 1):
grid = grid[0]
xmin = xmin[0]
if callable(finish):
# set up kwargs for `finish` function
finish_args = _getargspec(finish).args
finish_kwargs = dict()
if 'full_output' in finish_args:
finish_kwargs['full_output'] = 1
if 'disp' in finish_args:
finish_kwargs['disp'] = disp
elif 'options' in finish_args:
# pass 'disp' as `options`
# (e.g. if `finish` is `minimize`)
finish_kwargs['options'] = {'disp': disp}
# run minimizer
res = finish(func, xmin, args=args, **finish_kwargs)
if isinstance(res, OptimizeResult):
xmin = res.x
Jmin = res.fun
success = res.success
else:
xmin = res[0]
Jmin = res[1]
success = res[-1] == 0
if not success:
if disp:
print("Warning: Either final optimization did not succeed "
"or `finish` does not return `statuscode` as its last "
"argument.")
if full_output:
return xmin, Jmin, grid, Jout
else:
return xmin
def show_options(solver=None, method=None, disp=True):
"""
Show documentation for additional options of optimization solvers.
These are method-specific options that can be supplied through the
``options`` dict.
Parameters
----------
solver : str
Type of optimization solver. One of 'minimize', 'minimize_scalar',
'root', or 'linprog'.
method : str, optional
If not given, shows all methods of the specified solver. Otherwise,
show only the options for the specified method. Valid values
corresponds to methods' names of respective solver (e.g. 'BFGS' for
'minimize').
disp : bool, optional
Whether to print the result rather than returning it.
Returns
-------
text
Either None (for disp=False) or the text string (disp=True)
Notes
-----
The solver-specific methods are:
`scipy.optimize.minimize`
- :ref:`Nelder-Mead <optimize.minimize-neldermead>`
- :ref:`Powell <optimize.minimize-powell>`
- :ref:`CG <optimize.minimize-cg>`
- :ref:`BFGS <optimize.minimize-bfgs>`
- :ref:`Newton-CG <optimize.minimize-newtoncg>`
- :ref:`L-BFGS-B <optimize.minimize-lbfgsb>`
- :ref:`TNC <optimize.minimize-tnc>`
- :ref:`COBYLA <optimize.minimize-cobyla>`
- :ref:`SLSQP <optimize.minimize-slsqp>`
- :ref:`dogleg <optimize.minimize-dogleg>`
- :ref:`trust-ncg <optimize.minimize-trustncg>`
`scipy.optimize.root`
- :ref:`hybr <optimize.root-hybr>`
- :ref:`lm <optimize.root-lm>`
- :ref:`broyden1 <optimize.root-broyden1>`
- :ref:`broyden2 <optimize.root-broyden2>`
- :ref:`anderson <optimize.root-anderson>`
- :ref:`linearmixing <optimize.root-linearmixing>`
- :ref:`diagbroyden <optimize.root-diagbroyden>`
- :ref:`excitingmixing <optimize.root-excitingmixing>`
- :ref:`krylov <optimize.root-krylov>`
- :ref:`df-sane <optimize.root-dfsane>`
`scipy.optimize.minimize_scalar`
- :ref:`brent <optimize.minimize_scalar-brent>`
- :ref:`golden <optimize.minimize_scalar-golden>`
- :ref:`bounded <optimize.minimize_scalar-bounded>`
`scipy.optimize.linprog`
- :ref:`simplex <optimize.linprog-simplex>`
"""
import textwrap
doc_routines = {
'minimize': (
('bfgs', 'scipy.optimize.optimize._minimize_bfgs'),
('cg', 'scipy.optimize.optimize._minimize_cg'),
('cobyla', 'scipy.optimize.cobyla._minimize_cobyla'),
('dogleg', 'scipy.optimize._trustregion_dogleg._minimize_dogleg'),
('l-bfgs-b', 'scipy.optimize.lbfgsb._minimize_lbfgsb'),
('nelder-mead', 'scipy.optimize.optimize._minimize_neldermead'),
('newtoncg', 'scipy.optimize.optimize._minimize_newtoncg'),
('powell', 'scipy.optimize.optimize._minimize_powell'),
('slsqp', 'scipy.optimize.slsqp._minimize_slsqp'),
('tnc', 'scipy.optimize.tnc._minimize_tnc'),
('trust-ncg', 'scipy.optimize._trustregion_ncg._minimize_trust_ncg'),
),
'root': (
('hybr', 'scipy.optimize.minpack._root_hybr'),
('lm', 'scipy.optimize._root._root_leastsq'),
('broyden1', 'scipy.optimize._root._root_broyden1_doc'),
('broyden2', 'scipy.optimize._root._root_broyden2_doc'),
('anderson', 'scipy.optimize._root._root_anderson_doc'),
('diagbroyden', 'scipy.optimize._root._root_diagbroyden_doc'),
('excitingmixing', 'scipy.optimize._root._root_excitingmixing_doc'),
('linearmixing', 'scipy.optimize._root._root_linearmixing_doc'),
('krylov', 'scipy.optimize._root._root_krylov_doc'),
('df-sane', 'scipy.optimize._spectral._root_df_sane'),
),
'linprog': (
('simplex', 'scipy.optimize._linprog._linprog_simplex'),
),
'minimize_scalar': (
('brent', 'scipy.optimize.optimize._minimize_scalar_brent'),
('bounded', 'scipy.optimize.optimize._minimize_scalar_bounded'),
('golden', 'scipy.optimize.optimize._minimize_scalar_golden'),
),
}
if solver is None:
text = ["\n\n\n========\n", "minimize\n", "========\n"]
text.append(show_options('minimize', disp=False))
text.extend(["\n\n===============\n", "minimize_scalar\n",
"===============\n"])
text.append(show_options('minimize_scalar', disp=False))
text.extend(["\n\n\n====\n", "root\n",
"====\n"])
text.append(show_options('root', disp=False))
text.extend(['\n\n\n=======\n', 'linprog\n',
'=======\n'])
text.append(show_options('linprog', disp=False))
text = "".join(text)
else:
solver = solver.lower()
if solver not in doc_routines:
raise ValueError('Unknown solver %r' % (solver,))
if method is None:
text = []
for name, _ in doc_routines[solver]:
text.extend(["\n\n" + name, "\n" + "="*len(name) + "\n\n"])
text.append(show_options(solver, name, disp=False))
text = "".join(text)
else:
methods = dict(doc_routines[solver])
if method not in methods:
raise ValueError("Unknown method %r" % (method,))
name = methods[method]
# Import function object
parts = name.split('.')
mod_name = ".".join(parts[:-1])
__import__(mod_name)
obj = getattr(sys.modules[mod_name], parts[-1])
# Get doc
doc = obj.__doc__
if doc is not None:
text = textwrap.dedent(doc).strip()
else:
text = ""
if disp:
print(text)
return
else:
return text
def main():
import time
times = []
algor = []
x0 = [0.8, 1.2, 0.7]
print("Nelder-Mead Simplex")
print("===================")
start = time.time()
x = fmin(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Nelder-Mead Simplex\t')
print()
print("Powell Direction Set Method")
print("===========================")
start = time.time()
x = fmin_powell(rosen, x0)
print(x)
times.append(time.time() - start)
algor.append('Powell Direction Set Method.')
print()
print("Nonlinear CG")
print("============")
start = time.time()
x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200)
print(x)
times.append(time.time() - start)
algor.append('Nonlinear CG \t')
print()
print("BFGS Quasi-Newton")
print("=================")
start = time.time()
x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('BFGS Quasi-Newton\t')
print()
print("BFGS approximate gradient")
print("=========================")
start = time.time()
x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100)
print(x)
times.append(time.time() - start)
algor.append('BFGS without gradient\t')
print()
print("Newton-CG with Hessian product")
print("==============================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with hessian product')
print()
print("Newton-CG with full Hessian")
print("===========================")
start = time.time()
x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80)
print(x)
times.append(time.time() - start)
algor.append('Newton-CG with full hessian')
print()
print("\nMinimizing the Rosenbrock function of order 3\n")
print(" Algorithm \t\t\t Seconds")
print("===========\t\t\t =========")
for k in range(len(algor)):
print(algor[k], "\t -- ", times[k])
if __name__ == "__main__":
main()
|
andim/scipy
|
scipy/optimize/optimize.py
|
Python
|
bsd-3-clause
| 96,383
|
[
"Gaussian"
] |
8bcb8ac765e7ce76ddad3478928ad74164f5e7c5c67f637c076e95ba1df0bfda
|
#!/usr/bin/env python
"""
Cross-check energies of molecules from AlkEthOH set using SMIRNOFF xml file
versus energies from AMBER .prmtop and .crd files (parm@frosst params).
"""
import os
# datapath = './AlkEthOH_tripos/AlkEthOH_chain_filt1'
# datapath = './AlkEthOH_tripos/AlkEthOH_rings_filt1'
datapath = './AlkEthOH_tripos/AlkEthOH_test_filt1'
molname = 'AlkEthOH_r22'
mol_filepath = os.path.join(datapath, molname + '_tripos.mol2')
prmtop_filepath = os.path.join(datapath, molname + '.top')
inpcrd_filepath = os.path.join(datapath, molname + '.crd')
# Check if we have this data file; if not we have to extract the archive.
if not os.path.isdir(datapath):
print("Extracting archived molecule files.")
# Extract the AlkEthOH dataset shipped with the toolkit in data/molecules/ in the working directory.
from openff.toolkit.tests.utils import get_data_file_path
tarfile_path = os.path.join(get_data_file_path('molecules'), 'AlkEthOH_tripos.tar.gz')
import tarfile
with tarfile.open(tarfile_path, 'r:gz') as tar:
tar.extractall()
# Load molecule
from openff.toolkit.topology import Molecule
molecule = Molecule.from_file(mol_filepath)
# Load force field
from openff.toolkit.typing.engines.smirnoff import ForceField
forcefield = ForceField('test_forcefields/Frosst_AlkEthOH_parmAtFrosst.offxml')
# Compare energies
from openff.toolkit.tests.utils import compare_amber_smirnoff
# We ignore the charges as they are not included in the force field.
# TODO: Reactivate this check when we'll be able to load charges from the file.
energies = compare_amber_smirnoff(prmtop_filepath, inpcrd_filepath,
forcefield, molecule,
ignore_charges=True)
# Pretty-print the result.
from pprint import pprint
pprint(energies)
|
open-forcefield-group/openforcefield
|
examples/deprecated/SMIRNOFF_comparison/compare_molecule_energies.py
|
Python
|
mit
| 1,818
|
[
"Amber"
] |
24c47efcedef7b81736bd518ab93234f1cdf3869fe0d872983d664314d69a056
|
# coding: utf-8
__all__ = [
'__version__',
'name',
'description',
'long_description',
'license',
'__author__',
'author',
'author_email',
'url',
]
__version__ = '4.6.0'
name = "ElectronPhononCoupling"
description = "Python module to analyze electron-phonon related quantities."
long_description = """"
Compute electron-phonon coupling related quantities, such as:
- zero-point renormalization
- temperature dependance of eigenvalues
- quasiparticle lifetime from the el-ph self-energy
- frequency-dependent self-energy
- spectral function
"""
license = 'GPL'
authors = {
'GA': ('Gabriel Antonius', 'gabriel.antonius at gmail.com'),
}
author = 'The ABINIT group'
author_email = authors['GA'][1]
url = 'https://github.com/GkAntonius/ElectronPhononCoupling'
__author__ = ''
for auth, email in authors.itervalues():
__author__ += auth + ' <' + email + '>\n'
del auth, email
|
abinit/abinit
|
scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/config/release.py
|
Python
|
gpl-3.0
| 989
|
[
"ABINIT"
] |
d1c84e3c012c6bdbc09c3db42d945c79e6ce79c386ae38a0e5460c6cdddc7968
|
# -*- coding: utf-8 -*-
'''Views of site pages'''
from django.shortcuts import render_to_response
from amber.mongo_db import MAIN_COLLECTION, SERVERS_COLLECTION, split_words
from django.conf import settings
from datetime import datetime
def sizeof_fmt(num):
'''Formatting file or folder size'''
for size in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0:
return "%3.1f %s" % (num, size)
num /= 1024.0
return "%3.1f %s" % (num, 'TB')
def get_paths(servers, paths, is_orthodox):
'''Returm path links to given paths'''
res = []
if is_orthodox:
url_prefix = 'smb://'
else:
url_prefix = 'file:////'
for server_id in paths:
server = servers[server_id]
for path in paths[server_id]:
res.append({
'is_active': server['is_active'],
'url': server['host'] + path,
'urlprefix': url_prefix,
'title': server['name' if 'name' in server else 'host'] + path,
})
return res
def get_servers(active_only=False):
'''Returns servers list by ids'''
res = {}
if active_only:
search_res = SERVERS_COLLECTION.find({'is_active': True})
else:
search_res = SERVERS_COLLECTION.find()
for server in search_res:
res[str(server['_id'])] = server
return res
def postprocess_entry(servers, entry, is_orthodox):
'''Postprocessing given entry, making usable names and formatted values'''
return {
'name': entry['n'],
'size': sizeof_fmt(entry['s']),
'paths': get_paths(servers, entry['p'], is_orthodox),
'is_file': entry['f'],
}
def get_entry_type_query(entry_type):
'''Return query for given entry type'''
if entry_type not in settings.ENTRY_TYPES:
return {}
entry_filter = settings.ENTRY_TYPES[entry_type]
if 'is_file' in entry_filter:
return {'f': entry_filter['is_file']}
elif 'extensions' in entry_filter:
return {'e': {'$in': entry_filter['extensions']}}
return {}
def mainpage(request):
'''Main page. Interface for performing search queries and viewing results'''
servers = get_servers(active_only=True)
response_dict = {
'servers': servers,
'entry_types': settings.ENTRY_TYPES,
}
if request.method == 'GET' and 'q' in request.GET and request.GET['q']:
if 'windows' in request.META['HTTP_USER_AGENT'].lower():
is_orthodox = False
else:
is_orthodox = True
search_string = request.GET['q']
server_request = request.GET.get('server', '')
entry_type = request.GET.get('entry_type', '')
response_dict.update({
'search_string': search_string,
'server_request': server_request,
'entry_type': entry_type,
})
start_time = datetime.now()
# Basic search through name, type and server
search_dict = {'w': {'$all': split_words(search_string)}}
if server_request:
search_dict['p.' + server_request] = {'$exists': True}
if entry_type:
search_dict.update(get_entry_type_query(entry_type))
result = MAIN_COLLECTION.find(search_dict)
result = result.limit(settings.RESULT_NUM)
# Choosing how to sort
sort = {
'name': None,
'size': None,
'change': None,
}
if 'sort_name' in request.GET:
val = int(request.GET['sort_name'])
result = result.sort('n', val)
sort['name'] = val
elif 'sort_size' in request.GET:
val = int(request.GET['sort_size'])
result = result.sort('s', val)
sort['size'] = val
elif 'sort_change' in request.GET:
val = int(request.GET['sort_change'])
result = result.sort('c', val)
sort['change'] = val
else:
val = 1
result = result.sort('n', val)
sort['name'] = val
response_dict['sort'] = sort
# Setting output limit
#result = result.limit(RESULT_NUM)
# Looking into performance help if debugging
#if settings.DEBUG: response_dict['performance'] = result.explain()
response_dict['search_result'] = [
postprocess_entry(servers, entry, is_orthodox)
for entry in result
]
response_dict['search_time'] = datetime.now() - start_time
return render_to_response('main.html', response_dict)
def servers_page(request):
'''Page with list of scanned servers'''
active_only = request.method == 'GET' and 'active' in request.GET
servers = get_servers(active_only)
return render_to_response('servers.html', {'servers': servers})
|
yl3dy/amber
|
amber/views.py
|
Python
|
bsd-3-clause
| 4,799
|
[
"Amber"
] |
24ea947653a9092ac1bdd05377886e86c46412821642e7b19e6820946402a773
|
class mutation_stability(object):
'''
check mutation and format it so that it's compatible with foldx structure 1HA0 and 2YP7
'''
def __init__(self, mut, structure):
self.mut = mut # list of mutations
self.mut_set = set(mut) # set of mutations
self.mut_chain_info_set = set()
self.structure = structure # either 1HA0 or 2YP7
if self.structure not in ["1HA0", "2YP7"]:
raise ValueError("This program only works for pdb structures 1HA0 or 2YP7")
def __str__(self):
return ", ".join(self.mut_chain_info_set)
def site_range_valid(self, mutation):
'''
protein structures (1HA0, 2YP7) are missing certain amino acid sites, method checks that mutation is in structure
:param mutation: mutation in standard format
:return: true if site is in structure, false if site range is not in structure
'''
lowerRange = 9
upperRange = 502
missing_lower = 328
missing_upper = 333
site = int(mutation[1:len(mutation) - 1])
if missing_lower <= site <= missing_upper: # in missing middle section
return False
elif lowerRange <= site <= upperRange: # in range of protein structure besides middle section
return True
else:
return False
def include_chain_info(self, mutation):
'''
includes chain information for each mutation passed to function. HA is a trimer so need to specify chain for
foldx
:param mutation: mutation in standard format
'''
set_with_chain_mutations = set()
if self.structure == "1HA0":
chains = ["A", "M", "Y"]
elif self.structure == "2YP7":
chains = ["A", "P", "E"]
site = mutation[1:len(mutation) - 1]
aa1 = mutation[0]
aa2 = mutation[len(mutation)-1]
for chain in chains:
self.mut_chain_info_set.add(aa1+chain+site+aa2)
def check_valid_mutation(self):
'''
checks each mutation in mut_set that it is a valid mutation for the structures 1HA0, 2YP7. Calls
include_chain_info, which adds each mutation with chain info to self.mut_chain_info_set.
'''
for mutation in self.mut_set:
site_valid = self.site_range_valid(mutation)
if site_valid:
self.include_chain_info(mutation)
def get_formatted_mutations(self):
self.check_valid_mutation()
return ';'.join(self.mut_chain_info_set)
|
blab/stability
|
augur/mutator/mutate_everything_2YP7/foldx_essentials/mutation_stability.py
|
Python
|
agpl-3.0
| 2,540
|
[
"FoldX"
] |
fbde5863cc982e3ce0a0f61795cee1971c9d4abbaefcf4942ca858b9e72399e7
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2009 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" Module with the always_comb function. """
import sys
import inspect
from types import FunctionType
import re
import ast
from myhdl import AlwaysCombError
from myhdl._Signal import _Signal, _isListOfSigs
from myhdl._util import _isGenFunc, _dedent
from myhdl._cell_deref import _cell_deref
from myhdl._Waiter import _Waiter, _SignalWaiter, _SignalTupleWaiter
from myhdl._instance import _Instantiator
class _error:
pass
_error.ArgType = "always_comb argument should be a classic function"
_error.NrOfArgs = "always_comb argument should be a function without arguments"
_error.Scope = "always_comb argument should be a local function"
_error.SignalAsInout = "signal (%s) used as inout in always_comb function argument"
_error.EmbeddedFunction = "embedded functions in always_comb function argument not supported"
_error.EmptySensitivityList= "sensitivity list is empty"
def always_comb(func):
if not isinstance( func, FunctionType):
raise AlwaysCombError(_error.ArgType)
if _isGenFunc(func):
raise AlwaysCombError(_error.ArgType)
if func.func_code.co_argcount > 0:
raise AlwaysCombError(_error.NrOfArgs)
varnames = func.func_code.co_varnames
symdict = {}
for n, v in func.func_globals.items():
if n not in varnames:
symdict[n] = v
# handle free variables
if func.func_code.co_freevars:
for n, c in zip(func.func_code.co_freevars, func.func_closure):
try:
obj = _cell_deref(c)
symdict[n] = obj
except NameError:
raise NameError(n)
c = _AlwaysComb(func, symdict)
return c
INPUT, OUTPUT, INOUT = range(3)
class _SigNameVisitor(ast.NodeVisitor):
def __init__(self, symdict):
self.inputs = set()
self.outputs = set()
self.toplevel = 1
self.symdict = symdict
self.context = INPUT
def visit_Module(self, node):
inputs = self.inputs
outputs = self.outputs
for n in node.body:
self.visit(n)
for n in inputs:
if n in outputs:
raise AlwaysCombError(_error.SignalAsInout % n)
def visit_FunctionDef(self, node):
if self.toplevel:
self.toplevel = 0 # skip embedded functions
for n in node.body:
self.visit(n)
else:
raise AlwaysCombError(_error.EmbeddedFunction)
def visit_If(self, node):
if not node.orelse:
if isinstance(node.test, ast.Name) and \
node.test.id == '__debug__':
return # skip
self.generic_visit(node)
def visit_Name(self, node):
id = node.id
if id not in self.symdict:
return
s = self.symdict[id]
if isinstance(s, _Signal) or _isListOfSigs(s):
if self.context == INPUT:
self.inputs.add(id)
elif self.context == OUTPUT:
self.outputs.add(id)
elif self.context == INOUT:
raise AlwaysCombError(_error.SignalAsInout % id)
else:
raise AssertionError("bug in always_comb")
def visit_Assign(self, node):
self.context = OUTPUT
for n in node.targets:
self.visit(n)
self.context = INPUT
self.visit(node.value)
def visit_Attribute(self, node):
self.visit(node.value)
def visit_Subscript(self, node, access=INPUT):
self.visit(node.value)
self.context = INPUT
self.visit(node.slice)
def visit_AugAssign(self, node, access=INPUT):
self.context = INOUT
self.visit(node.target)
self.context = INPUT
self.visit(node.value)
def visit_ClassDef(self, node):
pass # skip
def visit_Exec(self, node):
pass # skip
def visit_Print(self, node):
pass # skip
class _AlwaysComb(_Instantiator):
# def __init__(self, func, symdict):
# self.func = func
# self.symdict = symdict
# s = inspect.getsource(func)
# # remove decorators
# s = re.sub(r"@.*", "", s)
# s = s.lstrip()
# tree = compiler.parse(s)
# v = _SigNameVisitor(symdict)
# compiler.walk(tree, v)
# self.inputs = v.inputs
# self.outputs = v.outputs
# senslist = []
# for n in self.inputs:
# s = self.symdict[n]
# if isinstance(s, Signal):
# senslist.append(s)
# else: # list of sigs
# senslist.extend(s)
# self.senslist = tuple(senslist)
# self.gen = self.genfunc()
# if len(self.senslist) == 0:
# raise AlwaysCombError(_error.EmptySensitivityList)
# if len(self.senslist) == 1:
# W = _SignalWaiter
# else:
# W = _SignalTupleWaiter
# self.waiter = W(self.gen)
def __init__(self, func, symdict):
self.func = func
self.symdict = symdict
s = inspect.getsource(func)
s = _dedent(s)
tree = ast.parse(s)
# print ast.dump(tree)
v = _SigNameVisitor(symdict)
v.visit(tree)
self.inputs = v.inputs
self.outputs = v.outputs
senslist = []
for n in self.inputs:
s = self.symdict[n]
if isinstance(s, _Signal):
senslist.append(s)
else: # list of sigs
senslist.extend(s)
self.senslist = tuple(senslist)
self.gen = self.genfunc()
if len(self.senslist) == 0:
raise AlwaysCombError(_error.EmptySensitivityList)
if len(self.senslist) == 1:
W = _SignalWaiter
else:
W = _SignalTupleWaiter
self.waiter = W(self.gen)
def genfunc(self):
senslist = self.senslist
if len(senslist) == 1:
senslist = senslist[0]
func = self.func
while 1:
func()
yield senslist
|
cordoval/myhdl-python
|
myhdl/_always_comb.py
|
Python
|
lgpl-2.1
| 6,955
|
[
"VisIt"
] |
83c585e3dbab48d0b160125b294dc3ebfa755920c328470901d6fcacccac711f
|
# -*- mode: python; -*-
##
## sashimi_plot
##
## Utility for visualizing RNA-Seq densities along gene models and
## for plotting MISO output
##
import os
import sys
import glob
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
# Use PDF backend
try: matplotlib.use("pdf")
except Exception: pass
from scipy import *
from numpy import *
import pysam
import shelve
import misopy
import misopy.gff_utils as gff_utils
import misopy.pe_utils as pe_utils
from misopy.parse_csv import csv2dictlist_raw
from misopy.samples_utils import load_samples
from misopy.sashimi_plot.Sashimi import Sashimi
from misopy.sashimi_plot.plot_utils.samples_plotter import SamplesPlotter
from misopy.sashimi_plot.plot_utils.plotting import *
from misopy.sashimi_plot.plot_utils.plot_gene import plot_density_from_file
import matplotlib.pyplot as plt
from matplotlib import rc
def plot_bf_dist(bf_filename, settings_filename, output_dir,
max_bf=1e12):
"""
Plot a Bayes factor distribution from a .miso_bf file.
"""
if not bf_filename.endswith(".miso_bf"):
print "WARNING: %s does not end in .miso_bf, are you sure it is the " \
"output of a MISO samples comparison?" %(bf_filename)
# Load BF data
data, h = csv2dictlist_raw(bf_filename)
plot_name = os.path.basename(bf_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
# Setup the figure
sashimi_obj.setup_figure()
# Matrix of bayes factors and delta psi pairs
bfs_and_deltas = []
for event in data:
bf = event['bayes_factor']
delta_psi = event['diff']
if type(bf) == str and "," in bf:
print "WARNING: %s is a multi-isoform event, skipping..." \
%(event)
continue
else:
# Impose upper limit on Bayes factor
bf = min(1e12, float(bf))
delta_psi = float(delta_psi)
bfs_and_deltas.append([bf, delta_psi])
bfs_and_deltas = array(bfs_and_deltas)
num_events = len(bfs_and_deltas)
print "Loaded %d event comparisons." %(num_events)
output_filename = sashimi_obj.output_filename
print "Plotting Bayes factors distribution"
print " - Output filename: %s" %(output_filename)
bf_thresholds = settings["bf_thresholds"]
bar_color = settings["bar_color"]
min_bf_thresh = min(bf_thresholds)
num_events_used = sum(bfs_and_deltas[:, 0] >= min_bf_thresh)
for thresh in bf_thresholds:
if type(thresh) != int:
print "Error: BF thresholds must be integers."
#sys.exit(1)
print "Using BF thresholds: "
print bf_thresholds
print "Using bar color: %s" %(bar_color)
plot_cumulative_bars(bfs_and_deltas[:, 0],
bf_thresholds,
bar_color=bar_color,
logged=True)
plt.xticks(bf_thresholds)
c = 1
plt.xlim([bf_thresholds[0] - c, bf_thresholds[-1] + c])
plt.title("Bayes factor distributions\n(using %d/%d events)" \
%(num_events_used, num_events))
plt.xlabel("Bayes factor thresh.")
plt.ylabel("No. events")
sashimi_obj.save_plot()
def plot_event(event_name, pickle_dir, settings_filename,
output_dir,
no_posteriors=False,
plot_title=None,
plot_label=None):
"""
Visualize read densities across the exons and junctions
of a given MISO alternative RNA processing event.
Also plots MISO estimates and Psi values.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
if not os.path.isdir(pickle_dir):
print "Error: event pickle directory %s not found." %(pickle_dir)
#sys.exit(1)
# Retrieve the full pickle filename
genes_filename = os.path.join(pickle_dir,
"genes_to_filenames.shelve")
# Check that file basename exists
if len(glob.glob("%s*" %(genes_filename))) == 0:
raise Exception, "Cannot find file %s. Are you sure the events " \
"were indexed with the latest version of index_gff.py?" \
%(genes_filename)
event_to_filenames = shelve.open(genes_filename)
if event_name not in event_to_filenames:
raise Exception, "Event %s not found in pickled directory %s. " \
"Are you sure this is the right directory for the event?" \
%(event_name, pickle_dir)
pickle_filename = event_to_filenames[event_name]
if pickle_dir not in pickle_filename:
import string
pickle_filename = string.replace(pickle_filename,'\\','/')
pickle_filename = pickle_dir + string.split(pickle_filename,'sashimi_index')[1]
import string
#pickle_filename = string.replace(pickle_filename,' 1','')
if no_posteriors:
print "Asked to not plot MISO posteriors."
plot_density_from_file(settings_filename, pickle_filename, event_name,
output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
def plot_insert_len(insert_len_filename,
settings_filename,
output_dir):
"""
Plot insert length distribution.
"""
if not os.path.isfile(settings_filename):
print "Error: settings filename %s not found." %(settings_filename)
#sys.exit(1)
plot_name = os.path.basename(insert_len_filename)
sashimi_obj = Sashimi(plot_name, output_dir,
settings_filename=settings_filename)
settings = sashimi_obj.settings
num_bins = settings["insert_len_bins"]
output_filename = sashimi_obj.output_filename
sashimi_obj.setup_figure()
s = plt.subplot(1, 1, 1)
print "Plotting insert length distribution..."
print " - Distribution file: %s" %(insert_len_filename)
print " - Output plot: %s" %(output_filename)
insert_dist, params = pe_utils.load_insert_len(insert_len_filename)
mean, sdev, dispersion, num_pairs \
= pe_utils.compute_insert_len_stats(insert_dist)
print "min insert: %.1f" %(min(insert_dist))
print "max insert: %.1f" %(max(insert_dist))
plt.title("%s (%d read-pairs)" \
%(plot_name,
num_pairs),
fontsize=10)
plt.hist(insert_dist, bins=num_bins, color='k',
edgecolor="#ffffff", align='mid')
axes_square(s)
ymin, ymax = s.get_ylim()
plt.text(0.05, 0.95, "$\mu$: %.1f\n$\sigma$: %.1f\n$d$: %.1f" \
%(round(mean, 2),
round(sdev, 2),
round(dispersion, 2)),
horizontalalignment='left',
verticalalignment='top',
bbox=dict(edgecolor='k', facecolor="#ffffff",
alpha=0.5),
fontsize=10,
transform=s.transAxes)
plt.xlabel("Insert length (nt)")
plt.ylabel("No. read pairs")
sashimi_obj.save_plot()
def greeting():
print "Sashimi plot: Visualize spliced RNA-Seq reads along gene models. " \
"Part of the MISO (Mixture of Isoforms model) framework."
print "See --help for usage.\n"
print "Manual available at: http://genes.mit.edu/burgelab/miso/docs/sashimi.html\n"
def main():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--plot-insert-len", dest="plot_insert_len", nargs=2, default=None,
help="Plot the insert length distribution from a given insert length (*.insert_len) "
"filename. Second argument is a settings file name.")
parser.add_option("--plot-bf-dist", dest="plot_bf_dist", nargs=2, default=None,
help="Plot Bayes factor distributon. Takes the arguments: "
"(1) Bayes factor filename (*.miso_bf) filename, "
"(2) a settings filename.")
parser.add_option("--plot-event", dest="plot_event", nargs=3, default=None,
help="Plot read densities and MISO inferences for a given alternative event. "
"Takes the arguments: (1) event name (i.e. the ID= of the event based on MISO gff3 "
"annotation file, (2) directory where indexed GFF annotation is (output of "
"index_gff.py), (3) path to plotting settings file.")
parser.add_option("--no-posteriors", dest="no_posteriors", default=False, action="store_true",
help="If given this argument, MISO posterior estimates are not plotted.")
parser.add_option("--plot-title", dest="plot_title", default=None, nargs=1,
help="Title of plot: a string that will be displayed at top of plot. Example: " \
"--plot-title \"My favorite gene\".")
parser.add_option("--plot-label", dest="plot_label", default=None, nargs=1,
help="Plot label. If given, plot will be saved in the output directory as " \
"the plot label ending in the relevant extension, e.g. <plot_label>.pdf. " \
"Example: --plot-label my_gene")
parser.add_option("--output-dir", dest="output_dir", nargs=1, default=None,
help="Output directory.")
(options, args) = parser.parse_args()
if options.plot_event is None:
greeting()
#sys.exit(1)
if options.output_dir == None:
print "Error: need --output-dir"
#sys.exit(1)
output_dir = os.path.abspath(os.path.expanduser(options.output_dir))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
no_posteriors = options.no_posteriors
plot_title = options.plot_title
plot_label = options.plot_label
if options.plot_insert_len != None:
insert_len_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_insert_len[1]))
plot_insert_len(insert_len_filename, settings_filename, output_dir)
if options.plot_bf_dist != None:
bf_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[0]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_bf_dist[1]))
plot_bf_dist(bf_filename, settings_filename, output_dir)
if options.plot_event != None:
event_name = options.plot_event[0]
pickle_dir = os.path.abspath(os.path.expanduser(options.plot_event[1]))
settings_filename = os.path.abspath(os.path.expanduser(options.plot_event[2]))
plot_event(event_name, pickle_dir, settings_filename, output_dir,
no_posteriors=no_posteriors,
plot_title=plot_title,
plot_label=plot_label)
if __name__ == '__main__':
main()
|
wuxue/altanalyze
|
misopy/sashimi_plot/sashimi_plot.py
|
Python
|
apache-2.0
| 11,215
|
[
"pysam"
] |
5fe36acc350788cf14484b9167a7fb074e6c9d1fd84fadb7079f872b0aa8bf5e
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import subprocess
from distutils.spawn import find_executable
import mdtraj as md
import numpy as np
import pytest
import scipy.sparse
from mdtraj.testing import eq
DSSP_MSG = "This test requires mkdssp to be installed, from http://swift.cmbi.ru.nl/gv/dssp/"
needs_dssp = pytest.mark.skipif(not find_executable('mkdssp'), reason=DSSP_MSG)
def test_hbonds(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
ours = md.geometry.hbond.kabsch_sander(t)
@needs_dssp
def test_hbonds_against_dssp(get_fn, tmpdir):
t = md.load(get_fn('2EQQ.pdb'))[0]
pdb = os.path.join(tmpdir, 'f.pdb')
dssp = os.path.join(tmpdir, 'f.pdb.dssp')
t.save(pdb)
cmd = ['mkdssp', '-i', pdb, '-o', dssp]
subprocess.check_output(' '.join(cmd), shell=True)
energy = scipy.sparse.lil_matrix((t.n_residues, t.n_residues))
# read the dssp N-H-->O column from the output file
with open(dssp) as f:
# skip the lines until the description of each residue's hbonds
while not f.readline().startswith(' # RESIDUE AA STRUCTURE'):
continue
for i, line in enumerate(f):
line = line.rstrip()
offset0, e0 = map(float, line[39:50].split(','))
offset1, e1 = map(float, line[61:72].split(','))
if e0 <= -0.5:
energy[int(i + offset0), i] = e0
if e1 <= -0.5:
energy[int(i + offset1), i] = e1
dssp = energy.todense()
ours = md.geometry.hbond.kabsch_sander(t)[0].todense()
# There is tricky issues with the rounding right at the -0.5 cutoff,
# so lets just check for equality with DSSP at -0.6 or less
eq((dssp < -0.6), (ours < -0.6))
eq(dssp[dssp < -0.6], ours[ours < -0.6], decimal=1)
def test_baker_hubbard_0(get_fn):
t = md.load(get_fn('2EQQ.pdb'))
# print('to view the hbonds defined in 2EQQ by baker_hubbard()')
# print('put these commands into pymol on top of the pdb:\n')
# for e in md.geometry.hbond.baker_hubbard(t):
# print('distance RANK %d, RANK %d' % (e[1], e[2]))
# these are the results produced by the algorithm on this protein as
# of 11/26/13. This unit test basically just ensures that the method
# runs and produces the same results it did then. It's no guarentee that
# these are the "TRUE" hydrogen bonds in this system.
ref = np.array([[0, 10, 8], [0, 11, 7], [69, 73, 54], [76, 82, 65],
[119, 131, 89], [140, 148, 265], [166, 177, 122],
[181, 188, 231], [209, 217, 215], [221, 225, 184],
[228, 239, 186], [235, 247, 216], [262, 271, 143],
[298, 305, 115], [186, 191, 215], [413, 419, 392]])
eq(ref, md.geometry.hbond.baker_hubbard(t))
def test_baker_hubbard_1(get_fn):
# no hydrogens in this file -> no hydrogen bonds
t = md.load(get_fn('1bpi.pdb'))
eq(np.zeros((0, 3), dtype=int), md.baker_hubbard(t))
def test_baker_hubbard_2(get_fn):
t = md.load(get_fn('1vii_sustiva_water.pdb'))
triplets = md.baker_hubbard(t)
N = 1000
rows = triplets[:, 0] * N * N + triplets[:, 1] * N + triplets[:, 2]
# ensure that there aren't any repeat rows
eq(len(np.unique(rows)), len(rows))
def test_wernet_nilsson_0(get_fn):
# no hydrogens in this file -> no hydrogen bonds
t0 = md.load(get_fn('1bpi.pdb'))
assert len(md.wernet_nilsson(t0)) == len(t0)
eq(np.zeros((0, 3), dtype=int), md.wernet_nilsson(t0)[0])
def test_wernet_nilsson_1(get_fn):
# one of these files has PBCs and the other doesnt
for fn in ['2EQQ.pdb', '4ZUO.pdb']:
t = md.load(get_fn(fn))
result = md.wernet_nilsson(t)
assert len(result) == len(t)
assert isinstance(result, list)
assert all(isinstance(elem, np.ndarray) for elem in result)
assert all(elem.shape[1] == 3 for elem in result)
for frame, hbonds in enumerate(result):
for d_i, h_i, a_i in hbonds:
assert t.topology.atom(d_i).element.symbol in ['O', 'N']
assert t.topology.atom(h_i).element.symbol == 'H'
assert t.topology.atom(a_i).element.symbol in ['O', 'N']
# assert that the donor-acceptor distance is less than 0.5 nm, just
# to make sure the criterion is giving back totally implausible stuff
if len(hbonds) > 0:
assert np.all(md.compute_distances(t[frame], hbonds[:, [0, 2]]) < 0.5)
|
leeping/mdtraj
|
tests/test_hbonds.py
|
Python
|
lgpl-2.1
| 5,463
|
[
"MDTraj",
"PyMOL"
] |
acb832184755f24ae520ea8b38afd81fc4c3671948b4189de2d92467c55ec69b
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-classes-have-attributes
"""Keras layers that implement explicit (approximate) kernel feature maps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util.tf_export import keras_export
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
@keras_export('keras.layers.experimental.RandomFourierFeatures')
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that projects its inputs into a random feature space.
This layer implements a mapping from input space to a space with `output_dim`
dimensions, which approximates shift-invariant kernels. A kernel function
`K(x, y)` is shift-invariant if `K(x, y) == k(x - y)` for some function `k`.
Many popular Radial Basis Functions (RBF), including Gaussian and
Laplacian kernels, are shift-invariant.
The implementation of this layer is based on the following paper:
["Random Features for Large-Scale Kernel Machines"](
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
by Ali Rahimi and Ben Recht.
The distribution from which the parameters of the random features map (layer)
are sampled determines which shift-invariant kernel the layer approximates
(see paper for more details). You can use the distribution of your
choice. The layer supports out-of-the-box
approximation sof the following two RBF kernels:
- Gaussian: `K(x, y) == exp(- square(x - y) / (2 * square(scale)))`
- Laplacian: `K(x, y) = exp(-abs(x - y) / scale))`
**Note:** Unlike what is described in the paper and unlike what is used in
the Scikit-Learn implementation, the output of this layer does not apply
the `sqrt(2 / D)` normalization factor.
**Usage:** Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss), etc.
Examples:
A kernel multinomial logistic regression model with Gaussian kernel for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10, activation='softmax'),
])
model.compile(
optimizer='adam',
loss='categorical_crossentropy',
metrics=['categorical_accuracy']
)
```
A quasi-SVM classifier for MNIST:
```python
model = keras.Sequential([
keras.Input(shape=(784,)),
RandomFourierFeatures(
output_dim=4096,
scale=10.,
kernel_initializer='gaussian'),
layers.Dense(units=10),
])
model.compile(
optimizer='adam',
loss='hinge',
metrics=['categorical_accuracy']
)
```
To use another kernel, just replace the layer creation line with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Args:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string identifier or a Keras `Initializer` instance.
Currently only 'gaussian' and 'laplacian' are supported string
identifiers (case insensitive). Note that the kernel matrix is not
trainable.
scale: For Gaussian and Laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
a default value is used: if the kernel initializer is set to "gaussian",
`scale` defaults to `sqrt(input_dim / 2)`, otherwise, it defaults to 1.0.
Both the approximation error of the kernel and the classification quality
are sensitive to this parameter. If `trainable` is set to `True`, this
parameter is learned end-to-end during training and the provided value
serves as the initial value.
**Note:** When features from this layer are fed to a linear model,
by making `scale` trainable, the resulting optimization problem is
no longer convex (even if the loss function used by the linear model
is convex).
trainable: Whether the scaling parameter of the layer should be trainable.
Defaults to `False`.
name: String, name to use for this layer.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
'`output_dim` should be a positive integer. Given: {}.'.format(
output_dim))
if isinstance(kernel_initializer, six.string_types):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'
.format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
'Given: {}.'.format(scale))
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. Got {} instead.'.format(
input_shape.ndims))
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the inputs to `RandomFourierFeatures` '
'should be defined. Found `None`.')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
self.unscaled_kernel = self.add_weight(
name='unscaled_kernel',
shape=(input_dim, self.output_dim),
dtype=dtypes.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='bias',
shape=(self.output_dim,),
dtype=dtypes.float32,
initializer=init_ops.random_uniform_initializer(
minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
self.kernel_scale = self.add_weight(
name='kernel_scale',
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs, dtype=self.dtype)
inputs = math_ops.cast(inputs, dtypes.float32)
kernel = (1.0 / self.kernel_scale) * self.unscaled_kernel
outputs = gen_math_ops.MatMul(a=inputs, b=kernel)
outputs = nn.bias_add(outputs, self.bias)
return gen_math_ops.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The innermost dimension of input shape must be defined. Given: %s' %
input_shape)
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if not isinstance(kernel_initializer, six.string_types):
kernel_initializer = initializers.serialize(kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, six.string_types):
if initializer.lower() == 'gaussian':
random_features_initializer = init_ops.random_normal_initializer(
stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = init_ops.constant_initializer(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'.format(
random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, six.string_types) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
|
petewarden/tensorflow
|
tensorflow/python/keras/layers/kernelized.py
|
Python
|
apache-2.0
| 11,190
|
[
"Gaussian"
] |
8bd29219d121f90567ad2cbbaed36468b2fa29525cd72f1c174fd7eae1c3340a
|
import copy
import random
import logging
import numpy as np
import scipy.sparse as spp
from itertools import repeat, combinations
from collections import defaultdict
from indigo.operators import (
CompositeOperator, Product,
Eye, BlockDiag, Kron,
VStack, SpMatrix,
Adjoint, UnscaledFFT,
)
log = logging.getLogger(__name__)
class Transform(object):
"""
Visitor class for manipulating operator trees.
See Also
--------
`ast.NodeTransformer`
"""
def visit(self, node):
method_name = "visit_%s" % type(node).__name__
visitor_method = getattr(self, method_name, None)
if visitor_method:
node = visitor_method(node)
else:
node = self.generic_visit(node)
return node
def generic_visit(self, node):
if isinstance(node, CompositeOperator):
node._adopt( [self.visit(c) for c in node._children] )
return node
class Visitor(object):
"""
Visitor class for traversing operator trees.
See Also
--------
`ast.NodeVisitor`
"""
def visit(self, node):
self.generic_visit(node)
method_name = "visit_%s" % type(node).__name__
visitor_method = getattr(self, method_name, None)
if visitor_method:
visitor_method(node)
def generic_visit(self, node):
if isinstance(node, CompositeOperator):
for child in node._children:
self.visit(child)
class Optimize(Transform):
def __init__(self, recipe):
super(Transform, self).__init__()
self._recipe = recipe or []
def visit(self, node):
for Step in self._recipe:
log.info("running optimization step: %s" % Step.__name__)
node = Step().visit(node)
# reserve scratch space
shape = (node.memusage() // node.dtype.itemsize,)
b = node._backend
b._scratch = b.empty_array(shape, node.dtype)
b._scratch_pos = 0
return node
class RealizeMatrices(Transform):
"""
Converts CompositeOps into SpMatrix ops if all
children of the CompositeOp are SpMatrices.
"""
def visit_Product(self, node):
""" Product( SpMatrices+ ) => SpMatrix """
node = self.generic_visit(node)
left, right = node._children
if isinstance(left, SpMatrix) and isinstance(right, SpMatrix):
name = "{}*{}".format(left._name, right._name)
log.debug('realizing product %s * %s', left._name, right._name)
m = left._matrix @ right._matrix
return SpMatrix( node._backend, m, name=name )
else:
return node
def visit_VStack(self, node):
""" VStack( SpMatrices ) => SpMatrix """
node = self.generic_visit(node)
if all(isinstance(c, SpMatrix) for c in node._children):
name = "{}+".format(node._children[0]._name)
dtype = node._children[0].dtype
log.debug('realizing vstack %s', ', '.join(c._name for c in node._children))
m = spp.vstack( [c._matrix for c in node._children], dtype=dtype )
return SpMatrix( node._backend, m, name=name )
else:
return node
def visit_HStack(self, node):
""" HStack( SpMatrices ) => SpMatrix """
node = self.generic_visit(node)
if all(isinstance(c, SpMatrix) for c in node._children):
name = "{}+".format(node._children[0]._name)
dtype = node._children[0].dtype
log.debug('realizing hstack %s', ', '.join(c._name for c in node._children))
m = spp.hstack( [c._matrix for c in node._children], dtype=dtype )
return SpMatrix( node._backend, m, name=name )
else:
return node
def visit_BlockDiag(self, node):
""" BlockDiag( SpMatrices ) => SpMatrix """
node = self.generic_visit(node)
if all(isinstance(c, SpMatrix) for c in node.children):
name = "{}+".format(node._children[0]._name)
dtype = node._children[0].dtype
log.debug('realizing block_diag %s', ', '.join(c._name for c in node._children))
m = spp.block_diag( [c._matrix for c in node._children], dtype=dtype )
return SpMatrix( node._backend, m, name=name )
else:
return node
def visit_Kron(self, node):
""" Kron(I, SpMatrix) => SpMatrix """
node = self.generic_visit(node)
L, R = node.children
if isinstance(L, Eye):
L = L.realize()
if isinstance(L, SpMatrix) and isinstance(R, SpMatrix):
name = "({}(x){})".format(L._name, R._name)
log.debug('realizing kron %s x %s', L._name, R._name)
K = spp.kron(L._matrix, R._matrix)
return SpMatrix( node._backend, K, name=name )
else:
return node
def visit_Adjoint(self, node):
""" Adjoint(M) ==> M.H """
node = self.generic_visit(node)
child = node.child
if isinstance(child, SpMatrix):
log.debug('realizing adjoint %s', child._name)
m = child._matrix.getH()
name = "{}.H".format(child._name)
return SpMatrix( node._backend, m, name=name )
else:
return node
def visit_Eye(self, node):
node = self.generic_visit(node)
eye = spp.eye(node.shape[0], dtype=node.dtype)
return SpMatrix( node._backend, eye, name=node._name )
def visit_Scale(self, node):
node = self.generic_visit(node)
if isinstance(node.child, SpMatrix):
mat = node.child._matrix * node._val
return SpMatrix( node._backend, mat, name=node._name )
else:
return node
def visit_One(self, node):
one = spp.csr_matrix( np.ones(node.shape, dtype=node.dtype) )
return SpMatrix( node._backend, one, name=node._name)
class DistributeKroniOverProd(Transform):
""" Kron(I, A*B) ==> Kron(I, A) * Kron(I, B) """
def visit_Kron(self, node):
node = self.generic_visit(node)
L, R = node.children
if isinstance(L, Eye) and isinstance(R, Product):
kl = node._backend.Kron( L, R.left )
kr = node._backend.Kron( L, R.right )
return self.visit(kl * kr)
else:
return node
class DistributeAdjointOverProd(Transform):
""" Adjoint(A*B) ==> Adjoint(B) * Adjoint(A) """
def visit_Adjoint(self, node):
node = self.generic_visit(node)
if isinstance(node.child, Product):
l, r = node.child.children
return r.H * l.H
else:
return node
class LiftUnscaledFFTs(Transform):
def visit_Product(self, node):
node = self.generic_visit(node)
if isinstance(node, Product):
l, r = node.children
if isinstance(l, Product) and l.left.has(UnscaledFFT):
ll, lr = l.children
node = ll * self.visit(lr*r)
elif isinstance(r, Product) and r.right.has(UnscaledFFT):
rl, rr = r.children
node = self.visit(l*rl) * rr
return self.generic_visit(node)
def visit_Adjoint(self, node):
node = self.generic_visit(node)
if isinstance(node.child, Product):
node = node.child.right.H * node.child.left.H
return self.generic_visit(node)
def visit_Kron(self, node):
node = self.generic_visit(node)
L, R = node.children
if isinstance(L, Eye) and isinstance(R, Product):
node = node._backend.Kron(L, node.child.left) * \
node._backend.Kron(L, node.child.right)
return self.generic_visit(node)
class MakeRightLeaning(Transform):
def visit_Product(self, node):
node = self.generic_visit(node)
if isinstance(node, Product) and isinstance(node.left, Product):
l, r = node.children
ll, lr = l.children
return ll * self.visit(lr*r)
else:
return node
class GroupRightLeaningProducts(Transform):
def visit_Product(self, node):
node = self.generic_visit(node)
if isinstance(node, Product):
l, r = node.children
if isinstance(r, Product):
rl, rr = r.children
if isinstance(rl, SpMatrix):
node = (l*rl) * rr
return node
class SpyOut(Visitor):
def visit_SpMatrix(self, node):
from matplotlib import pyplot as plt
m = node._matrix
fig, ax = plt.subplots(1, figsize=(16,16))
ax.spy(m, markersize=1)
fig.savefig('mat.%s.png' % node._name)
from scipy.io import mmwrite
mmwrite('mat.%s.mtx' % node._name, m)
|
mbdriscoll/indigo
|
indigo/transforms.py
|
Python
|
bsd-3-clause
| 8,789
|
[
"VisIt"
] |
f64909e9dd3c8291983e39f44ff351dc862e6619b6ff1308a1f25c614380a52c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Package for analysis of magnetic structures.
"""
from pymatgen.analysis.magnetism.analyzer import * # noqa
|
gmatteo/pymatgen
|
pymatgen/analysis/magnetism/__init__.py
|
Python
|
mit
| 223
|
[
"pymatgen"
] |
5cc16a0bc4d38ed3a672395fa4b4766ef20479983d72b0621fae223099579f6d
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('branch', '0044_auto_20141204_1126'),
]
operations = [
migrations.AlterField(
model_name='demand',
name='category',
field=multiselectfield.db.fields.MultiSelectField(max_length=21, choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other')], verbose_name='Type of help'),
preserve_default=True,
),
migrations.AlterField(
model_name='offer',
name='category',
field=multiselectfield.db.fields.MultiSelectField(max_length=21, choices=[('1', 'Visit home'), ('2', 'Companionship'), ('3', 'Transport by car'), ('4', 'Shopping'), ('5', 'House sitting'), ('6', 'Manual jobs'), ('7', 'Gardening'), ('8', 'Pet sitting'), ('9', 'Personal care'), ('a', 'Administrative'), ('b', 'Other')], verbose_name='Type of help'),
preserve_default=True,
),
]
|
MaximeBiset/care4care
|
branch/migrations/0045_auto_20141204_1708.py
|
Python
|
agpl-3.0
| 1,281
|
[
"VisIt"
] |
558b45b0af964dbd85e9e2b9d879483a78947bc979c39b9bd213447db383d733
|
# -*- coding: utf-8 -*-
'''auto ordering call chain test mixins'''
from inspect import ismodule
from twoq.support import port
class ARandomQMixin(object):
def test_choice(self):
self.assertEqual(len(list(self.qclass(1, 2, 3, 4, 5, 6).choice())), 1)
def test_sample(self):
self.assertEqual(len(self.qclass(1, 2, 3, 4, 5, 6).sample(3).end()), 3)
def test_shuffle(self):
self.assertEqual(
len(self.qclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
class ACombineQMixin(object):
# def test_combinations(self):
# foo = self.qclass('ABCD').combinations(2).value(),
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'),
# ('C', 'D')],
# foo,
# )
#
# def test_permutations(self):
# foo = self.qclass('ABCD').permutations(2).value()
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'),
# ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'),
# ('D', 'B'), ('D', 'C')],
# foo,
# )
def test_product(self):
foo = self.qclass('ABCD', 'xy').product().value()
self.assertEqual(
foo,
[('A', 'x'), ('A', 'y'), ('B', 'x'), ('B', 'y'), ('C', 'x'),
('C', 'y'), ('D', 'x'), ('D', 'y')],
foo,
)
class AOrderQMixin(ARandomQMixin, ACombineQMixin):
'''combination mixin'''
def test_group(self,):
from math import floor
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).tap(lambda x: floor(x)).group().end(),
[[1.0, [1.3]], [2.0, [2.1, 2.4]]]
)
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).group().end(),
[[1.3, [1.3]], [2.1, [2.1]], [2.4, [2.4]]],
)
def test_grouper(self):
self.assertEqual(
self.qclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).grouper(2, 'x').end(),
[('moe', 'larry'), ('curly', 30), (40, 50), (True, 'x')]
)
def test_reversed(self):
self.assertEqual(
self.qclass(5, 4, 3, 2, 1).reverse().end(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.qclass(1, 2, 3, 4, 5, 6).tap(
lambda x: sin(x)
).sort().end(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.qclass(4, 6, 65, 3, 63, 2, 4).sort().end(),
[2, 3, 4, 4, 6, 63, 65],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule
|
lcrees/twoq
|
twoq/tests/auto/ordering.py
|
Python
|
bsd-3-clause
| 2,838
|
[
"MOE"
] |
2dcf060e7bf5ea9c7477d81d62b1bb133524d969eb923875b50c3b5553014a26
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.