id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1621320
|
from heapq import heappush,heappop
#create function for heap
def heap_sort(array):
heap = []
for element in array:
heappush(heap, element)
ordered = []
# element left in the top heap
while heap:
ordered.append(heappop(heap))
return ordered
# array
array = [14,22,15,3,27,9,16,35,18,22,1]
print('the result of the heap sort:')
print(heap_sort(array))
|
1621336
|
import logging
import sys
import subprocess
import json
import queue
import threading
import shutil
import pyaudio
import snowboydecoder
from speech_recognizer import AudioInputDevice
import avs
from audio_player import AudioDevice
class MplayerAudioDevice(AudioDevice):
def __init__(self, binary_path, options=None):
self._binary_path = binary_path
self._paused = False
self._options = options or []
def check_exists(self):
return shutil.which(self._binary_path)
def play_once(self, file, playlist=False):
try:
return subprocess.Popen([self._binary_path] + self._options + (['-playlist'] if playlist else []) + [file],
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except Exception:
logger.exception("Couldn't play audio")
def play_infinite(self, file):
try:
return subprocess.Popen(
[self._binary_path] + self._options + ["-loop", "0", file],
stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
except:
logger.exception("Couldn't play audio")
def stop(self, p):
p.communicate(input=b'quit 0\n')
def pause(self, p):
if not self._paused:
p.communicate(input=b'pause\n')
self._paused = True
def resume(self, p):
if self._paused:
p.communicate(input=b'pause\n')
self._paused = False
def ended(self, p):
return p.poll() is not None
class PyAudioInputDevice(AudioInputDevice):
def start_recording(self):
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024)
logger.info("recording...")
self._audio = audio
self._stream = stream
self._stopped = False
self._event = threading.Event()
def read(self, size=-1):
if self._event.is_set():
logger.info("MIC STOP REQUESTED")
self._stopped = True
self._stream.stop_stream()
self._stream.close()
self._audio.terminate()
self._event.clear()
if self._stopped:
logger.warning("READING FROM MIC WHILE CLOSED")
return b''
try:
return self._stream.read(size, exception_on_overflow=False)
except:
logger.exception("exception while reading from pyaudio stream")
self._stopped = True
try:
self._stream.stop_stream()
self._stream.close()
self._audio.terminate()
except:
pass
return b''
def stop_recording(self):
self._event.set()
def hotword_detect(logger, q, mic_stopped):
interrupted = False
def hotword_detected_callback():
snowboydecoder.play_audio_file()
nonlocal interrupted
interrupted = True
def interrupt_check_callback():
return interrupted
detector = snowboydecoder.HotwordDetector('resources/alexa.umdl', sensitivity=0.5)
logger.info("waiting for hotword...")
detector.start(detected_callback=hotword_detected_callback,
interrupt_check=interrupt_check_callback,
sleep_time=0.03)
detector.terminate()
q.put(('hotword',))
def start_hotword_detection_thread(q):
hdt = threading.Thread(target=hotword_detect, name='Hotword Detection Thread', args=(logger, q, mic_stopped))
hdt.setDaemon(False)
hdt.start()
if __name__ == '__main__':
# clear root logger handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# set new root logger handlers
logging.basicConfig(stream=sys.stdout,
format='[%(asctime)s][%(threadName)s][%(levelname)-5.5s][%(pathname)s:%(lineno)d] %(message)s',
level=logging.WARNING)
# when we log below WARNING, these libraries are a bit too verbose for me
logging.getLogger('hpack').setLevel(logging.WARNING)
logging.getLogger('hyper').setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
logger.info("STARTING ALEXA APP")
tokens = json.load(open('tokens.txt'))
secrets = json.load(open('secrets.txt'))
q = queue.Queue()
audio_devices = [MplayerAudioDevice('mplayer', ["-ao", "alsa", "-really-quiet", "-noconsolecontrols", "-slave"]),
MplayerAudioDevice('/Applications/MPlayer OSX Extended.app/Contents/Resources/Binaries/mpextended.mpBinaries/Contents/MacOS/mplayer', ["-really-quiet", "-noconsolecontrols", "-slave"])]
a = avs.AVS('v20160207',
tokens.get('access_token'),
tokens.get('refresh_token'),
secrets.get('client_id'),
secrets.get('client_secret'),
next(audio_device for audio_device in audio_devices if audio_device.check_exists()),
PyAudioInputDevice(),
'NEAR_FIELD')
mic_stopped = threading.Event()
start_hotword_detection_thread(q)
while True:
try:
job = q.get(block=False)
if job[0] == 'hotword':
logger.info("STARTING RECOGNIZE SPEECH")
a.recognize_speech()
logger.info("FINISHED RECOGNIZE SPEECH")
start_hotword_detection_thread(q)
else:
logger.error("unknown command: {}".format(job))
except queue.Empty:
pass
a.run()
|
1621381
|
from .kitti import KittiDataset
from .nuscenes import NuScenesDataset
from .lyft import LyftDataset
dataset_factory = {
"KITTI": KittiDataset,
"NUSC": NuScenesDataset,
"LYFT": LyftDataset,
}
def get_dataset(dataset_name):
return dataset_factory[dataset_name]
|
1621389
|
import os
import shutil
import stat
import pprint
import random
import sys
import logging
import time
import datetime
import functools
import platform
import re
import pbcommand.cli.utils as U
from pbcommand.models import ResourceTypes, TaskTypes
from pbcommand.common_options import add_log_debug_option
from pbcommand.cli import get_default_argparser
from pbcommand.models.report import Attribute, Report
from pbcommand.utils import which, nfs_exists_check
from pbcommand.validators import validate_file
from pbcommand.cli.utils import main_runner_default
from pbsmrtpipe.cluster import ClusterTemplateRender, ClusterTemplate
from pbsmrtpipe.cluster import Constants as ClusterConstants
from pbsmrtpipe.engine import run_command, backticks
from pbsmrtpipe.models import RunnableTask, TaskStates
import pbsmrtpipe.pb_io as IO
log = logging.getLogger(__name__)
slog = logging.getLogger('status.' + __name__)
__version__ = '1.0.1'
def _resolve_exe(exe):
"""
Try to resolve the abspath to the exe, default to the exe if not found
in path"""
x = which(exe)
return exe if x is None else os.path.abspath(x)
def validate_file_and_load_manifest(path):
rt = RunnableTask.from_manifest_json(validate_file(path))
# if we got here everything is valid
return path
def _add_manifest_json_option(p):
d = "Path to task-manifest.json"
p.add_argument('task_manifest', type=validate_file_and_load_manifest, help=d)
return p
def _add_stderr_file(p):
_d = "Stderr of exe'ed manifest task commands."
p.add_argument('--task-stderr', type=str, required=True, help=_d)
return p
def _add_stdout_file(p):
_d = "Stdout of exe'ed manifest task commands."
p.add_argument('--task-stdout', type=str, required=True, help=_d)
return p
def _add_base_options(p):
return _add_manifest_json_option(add_log_debug_option(p))
def _add_run_on_cluster_option(p):
p.add_argument('--cluster', action='store_true', default=False,
help="Submit tasks to cluster if the cluster env is defined and task type is 'distributed.'")
return p
def to_task_report(host, task_id, run_time_sec, exit_code, error_message, warning_message):
# Move this somewhere that makes sense
def to_a(idx, value):
return Attribute(idx, value)
datum = [('host', host),
('task_id', task_id),
('run_time', run_time_sec),
('exit_code', exit_code),
('error_msg', error_message),
('warning_msg', warning_message)]
attributes = [to_a(i, v) for i, v in datum]
r = Report("workflow_task", attributes=attributes)
return r
def write_task_report(job_resources, task_id, path_to_report, report_images):
"""
Copy image files to job html images dir, convert the task report to HTML
:type job_resources: JobResources
:param task_id:
:param job_resources:
:param path_to_report: abspath to the json pbreport
:return:
"""
report_html = os.path.join(job_resources.html, "{t}.html".format(t=task_id))
task_image_dir = os.path.join(job_resources.images, task_id)
if not os.path.exists(task_image_dir):
os.mkdir(task_image_dir)
shutil.copy(path_to_report, report_html)
for image in report_images:
shutil.copy(image, os.path.join(task_image_dir, os.path.basename(image)))
log.debug("Completed writing {t} report".format(t=task_id))
def _create_tmp_file_resource(path):
if not os.path.exists(path):
with open(path, 'a'):
os.utime(path, None)
log.debug("Created resource {r} {p}".format(r=ResourceTypes.TMP_FILE, p=path))
def _create_tmp_dir_resource(path):
if not os.path.exists(path):
os.makedirs(path)
log.debug("Created resource {r} {p}".format(r=ResourceTypes.TMP_DIR, p=path))
def create_tmp_resource(rtype, path):
if rtype == ResourceTypes.TMP_FILE:
_create_tmp_file_resource(path)
if rtype == ResourceTypes.TMP_DIR:
_create_tmp_dir_resource(path)
def create_tmp_resource_ignore_error(rtype, path):
try:
create_tmp_resource(rtype, path)
except Exception as e:
log.error("Failed to create resource type {t} -> '{p}'".format(t=rtype, p=path))
log.error(e)
def create_tmp_resources_ignore_error(resources):
for r in resources:
rtype = r['resource_type']
p = r['path']
create_tmp_resource(rtype, p)
def _cleanup_resource_type(rtype, validation_func, remove_func, path):
if rtype not in ResourceTypes.ALL():
log.warn("Invalid resource type {x}. Ignoring resource {p}".format(x=rtype, p=path))
return False
if rtype in ResourceTypes.is_tmp_resource(rtype):
if validation_func(path):
remove_func(path)
else:
log.warn("Unable to find resource type {t} -> {p}".format(t=rtype, p=path))
return True
cleanup_tmp_file = functools.partial(_cleanup_resource_type, ResourceTypes.TMP_FILE, os.path.isfile, os.remove)
cleanup_tmp_dir = functools.partial(_cleanup_resource_type, ResourceTypes.TMP_FILE, os.path.isfile, lambda x: shutil.rmtree(x, ignore_errors=True))
def cleanup_resource(rtype, path):
if rtype == ResourceTypes.TMP_FILE:
cleanup_tmp_file(path)
if rtype == ResourceTypes.TMP_DIR:
cleanup_tmp_dir(path)
return True
def cleanup_resources(runnable_task):
for resource in runnable_task.task.resources:
rtype = resource['resource_type']
path = resource['path']
try:
cleanup_resource(rtype, path)
except Exception as e:
log.error("Ignoring Error {e} during cleanup resource {r} -> {p}".format(r=rtype, p=path, e=e.message))
return True
def run_task(runnable_task, output_dir, task_stdout, task_stderr, debug_mode):
"""
Run a runnable task locally.
:param debug_mode: Enabling debug mode will not cleanup temp resources upon failure
:type debug_mode: bool
:param runnable_task: Runnable task instance
:type runnable_task: RunnableTask
:param output_dir: Path to output dir
:type output_dir: str
:param task_stderr: Absolute path to task stderr file
:type task_stderr: str
:param task_stdout: Absolute path to task stdout file
:type task_stdout: str
:return: (exit code, error message, run_time)
:rtype: (int, str, int)
"""
started_at = time.time()
def get_run_time():
return time.time() - started_at
# Default general catch all
rcode = 1
err_msg = ""
# host = socket.getfqdn()
host = platform.node()
ncmds = len(runnable_task.task.cmds)
# so core dumps are written to the job dir
os.chdir(output_dir)
env_json = os.path.join(output_dir, '.env.json')
IO.write_env_to_json(env_json)
with open(task_stdout, 'w') as stdout_fh:
with open(task_stderr, 'w') as stderr_fh:
stdout_fh.write(repr(runnable_task) + "\n")
stdout_fh.write("Created at {x} on {h}\n".format(x=datetime.datetime.now(), h=host))
stdout_fh.write("Running task in {o}\n".format(o=output_dir))
# Validate Inputs
for input_file in runnable_task.task.input_files:
if os.path.exists(input_file):
stdout_fh.write("Validated INPUT file '{i}\n".format(i=input_file))
else:
err_msg = "Unable to find INPUT file '{i}".format(i=input_file)
stderr_fh.write(err_msg + "\n")
log.error(err_msg)
break
# Create resources if necessary
#if runnable_task.task.resources:
# create_tmp_resources_ignore_error(runnable_task.task.resources)
stdout_fh.write("Starting to run {n} cmds.".format(n=len(runnable_task.task.cmds)))
stdout_fh.flush()
stderr_fh.flush()
for i, cmd in enumerate(runnable_task.task.cmds):
log.info("Running command \n" + cmd)
# see run_command API for future fixes
rcode, _, _, run_time = run_command(cmd, stdout_fh, stderr_fh, time_out=None)
if rcode != 0:
err_msg_ = "Failed task {i} exit code {r} in {s:.2f} sec (See file '{f}'.)".format(i=runnable_task.task.task_id, r=rcode, s=run_time, f=task_stderr)
stderr_fh.write(err_msg + "\n")
stderr_fh.flush()
t_error_msg = _extract_last_nlines(task_stderr)
err_msg = "\n".join([err_msg_, "Extracted from stderr", t_error_msg])
log.error(err_msg)
stdout_fh.write("breaking out. Unable to run remaining task commands.")
break
else:
stdout_fh.write("completed running cmd {i} of {n}. exit code {x} in {s:.2f} sec on host {h}\n".format(x=rcode, s=run_time, h=host, i=i + 1, n=ncmds))
smsg_ = "completed running commands. Exit code {i}".format(i=rcode)
log.debug(smsg_)
if rcode == 0:
log.info("Core RTC runner was successful. Validating output files.")
# Validate output files of a successful task.
for ix, output_file in enumerate(runnable_task.task.output_files):
if os.path.exists(output_file):
stdout_fh.write("Successfully validated {i} output file '{o}' on {h} \n".format(o=output_file, i=ix, h=host))
else:
rcode = 127
err_msg = "Unable to find {i} output file '{x}'. Marking task as failed. Setting exit code to {r}".format(x=output_file, i=ix, r=rcode)
stderr_fh.write(err_msg + "\n")
stdout_fh.write(err_msg + "\n")
log.error(err_msg)
# FIXME. There should be a better way to communicate warnings
warn_msg = ""
# Write the task summary to a pbcommand Report object
r = to_task_report(host, runnable_task.task.task_id, get_run_time(), rcode, err_msg, warn_msg)
task_report_path = os.path.join(output_dir, 'task-report.json')
msg = "Writing task id {i} task report to {r}".format(r=task_report_path, i=runnable_task.task.task_id)
log.info(msg)
stdout_fh.write(msg + "\n")
r.write_json(task_report_path)
stderr_fh.flush()
stdout_fh.flush()
# Cleanup resource files
if not debug_mode and runnable_task.task.resources:
try:
cleanup_resources(runnable_task)
log.debug("successfully cleaned up {n} resources.".format(n=len(runnable_task.task.resources)))
except Exception as e:
log.error(str(e))
log.error("failed to successfully cleanup resources. {f}".format(f=runnable_task.task.resources))
return rcode, err_msg, get_run_time()
def to_job_id(base_name, base_id):
return ''.join(['job.', base_name, str(base_id), str(random.randint(10000, 99999))])
def to_random_job_id(base_name):
return ''.join(['job.', str(random.randint(1000000, 10000000)), base_name])
def _extract_last_nlines(path, nlines=25):
"""Attempt to extract the last nlines from a file
If the file is not found or there's an error parsing the file,
an empty string is returned.
"""
try:
n = nlines + 1
nfs_exists_check(path)
with open(path, 'r') as f:
s = f.readlines()
return "".join(s[-n:])
except Exception as e:
log.warn("Unable to extract stderr from {p}. {e}".format(p=path, e=e))
return ""
def chmod_x(path_):
os.chmod(path_, os.stat(path_).st_mode | stat.S_IEXEC)
def run_task_on_cluster(runnable_task, task_manifest_path, output_dir, debug_mode):
"""
:param runnable_task:
:param output_dir:
:param debug_mode:
:return:
:type runnable_task: RunnableTask
"""
def _to_p(x_):
return os.path.join(output_dir, x_)
stdout_ = _to_p('stdout')
stderr_ = _to_p('stderr')
if runnable_task.task.is_distributed is False:
return run_task(runnable_task, output_dir, stdout_, stderr_, debug_mode)
if runnable_task.cluster is None:
log.warn("No cluster provided. Running task locally.")
return run_task(runnable_task, output_dir, stdout_, stderr_, debug_mode)
os.chdir(runnable_task.task.output_dir)
env_json = os.path.join(output_dir, '.cluster-env.json')
IO.write_env_to_json(env_json)
# sloppy API
if isinstance(runnable_task.cluster, ClusterTemplateRender):
render = runnable_task.cluster
else:
ctmpls = [ClusterTemplate(name, tmpl) for name, tmpl in runnable_task.cluster.iteritems()]
render = ClusterTemplateRender(ctmpls)
job_id = to_random_job_id(runnable_task.task.task_id)
log.debug("Using job id {i}".format(i=job_id))
qstdout = _to_p('cluster.stdout')
qstderr = _to_p('cluster.stderr')
qshell = _to_p('cluster.sh')
rcmd_shell = _to_p('run.sh')
# This needs to be flattened due to the new RTC layer
# Task Manifest Runner output
stdout = _to_p('stdout')
stderr = _to_p('stderr')
with open(qstdout, 'w+') as f:
f.write("Creating cluster stdout for Job {i} {r}\n".format(i=job_id, r=runnable_task))
debug_str = " --debug "
exe = _resolve_exe("pbtools-runner")
_d = dict(x=exe,
t=task_manifest_path,
o=stdout,
e=stderr,
d=debug_str,
m=stdout,
n=stderr,
r=output_dir)
# the quoting here is explicitly to handle spaces in paths
cmd = "{x} run {d} --output-dir=\"{r}\" --task-stderr=\"{e}\" --task-stdout=\"{o}\" \"{t}\" > \"{m}\" 2> \"{n}\"".format(**_d)
# write the pbtools-runner exe command
with open(rcmd_shell, 'w+') as x:
x.write(cmd + "\n")
chmod_x(rcmd_shell)
cluster_cmd = render.render(ClusterConstants.START, rcmd_shell, job_id, qstdout, qstderr, runnable_task.task.nproc)
log.info("Job submission command: " + cluster_cmd)
with open(qshell, 'w') as f:
f.write("#!/bin/bash\n")
f.write("set -o errexit\n")
f.write("set -o pipefail\n")
f.write("set -o nounset\n")
f.write(cluster_cmd.rstrip("\n") + " ${1+\"$@\"}\n")
f.write("exit $?")
chmod_x(qshell)
host = platform.node()
# so core dumps are written to the job dir
os.chdir(output_dir)
# print the underlying jms command if using runjmscmd
if re.search(r'/runjmscmd\b', cluster_cmd):
rcode, cstdout, cstderr, run_time = backticks("bash {q} --printcmd".format(q=qshell))
if rcode == 0:
log.info("Underlying JMS job submission command: " + "\n".join(cstdout))
# Blocking call
rcode, cstdout, cstderr, run_time = backticks("bash {q}".format(q=qshell))
log.info("Cluster command return code {r} in {s:.2f} sec".format(r=rcode, s=run_time))
msg_t = "{n} Completed running cluster command in {t:.2f} sec. Exit code {r} (task-type {i})"
msg_ = msg_t.format(r=rcode, t=run_time, i=runnable_task.task.task_type_id, n=datetime.datetime.now())
log.info(msg_)
# Append the bash cluster.sh stderr and stdout call to
# the cluster.stderr and cluster.stdout
with open(qstdout, 'a') as qf:
if cstdout:
qf.write("\n".join(cstdout) + "\n")
qf.write(msg_ + "\n")
with open(qstderr, 'a') as f:
if rcode != 0:
if cstderr:
f.write(str(cstderr) + "\n")
# fundamental output error str message of this func
err_msg = ""
warn_msg = ""
if rcode != 0:
p_err_msg = "task {i} failed (exit-code {x}) after {r:.2f} sec".format(i=runnable_task.task.task_id, r=run_time, x=rcode)
raw_stderr = _extract_last_nlines(stderr)
cluster_raw_stderr = _extract_last_nlines(qstderr)
err_msg = "\n".join([p_err_msg, raw_stderr, cluster_raw_stderr])
warn_msg = ""
# write the result status message to stderr if task failure
# doing this here to avoid having a double message
with open(qstderr, 'a') as f:
if rcode != 0:
if cstderr:
f.write(msg_ + "\n")
r = to_task_report(host, runnable_task.task.task_id, run_time, rcode, err_msg, warn_msg)
task_report_path = os.path.join(output_dir, 'task-report.json')
msg = "Writing task id {i} task report to {r}".format(r=task_report_path, i=runnable_task.task.task_id)
log.info(msg)
r.write_json(task_report_path)
return rcode, err_msg, run_time
def run_task_manifest(path):
output_dir = os.path.dirname(path)
os.chdir(output_dir)
stderr = os.path.join(output_dir, 'stderr')
stdout = os.path.join(output_dir, 'stdout')
try:
rt = RunnableTask.from_manifest_json(path)
except KeyError:
emsg = "Unable to deserialize RunnableTask from manifest {p}".format(p=path)
log.error(emsg)
raise
# blocking call
rcode, err_msg, run_time = run_task(rt, output_dir, stdout, stderr, True)
state = TaskStates.from_int(rcode)
return state, err_msg, run_time
def run_task_manifest_on_cluster(path):
"""
Run the Task on the queue (of possible)
:param path:
:return:
"""
output_dir = os.path.dirname(path)
os.chdir(output_dir)
rt = RunnableTask.from_manifest_json(path)
# this needs to be updated to have explicit paths to stderr, stdout
rcode, err_msg, run_time = run_task_on_cluster(rt, path, output_dir, True)
state = TaskStates.from_int(rcode)
return state, err_msg, run_time
def _args_run_task_manifest(args):
output_dir = os.getcwd() if args.output_dir is None else args.output_dir
task_manifest_path = args.task_manifest
log.info("Loading runnable-task from {f}".format(f=task_manifest_path))
rt = RunnableTask.from_manifest_json(task_manifest_path)
log.info("loaded runnable-task")
# (exit code, run_time_sec) =
rcode, err_msg, _ = run_task(rt, output_dir, args.task_stdout, args.task_stderr, args.debug)
return rcode
def _add_run_options(p):
_add_base_options(p)
U.add_output_dir_option(p)
_add_stdout_file(p)
_add_stderr_file(p)
return p
def run_to_cmd(runnable_task):
"""
Extract the cmds from the json and print them to stdout
:type runnable_task: RunnableTask
"""
print "\n".join(runnable_task.task.cmds)
return 0
def _args_to_cmd(args):
return run_to_cmd(RunnableTask.from_manifest_json(args.task_manifest))
def pprint_task_manifest(runnable_task):
print pprint.pformat(runnable_task.__dict__)
return 0
def _args_pprint_task_manifest(args):
return pprint_task_manifest(RunnableTask.from_manifest_json(args.task_manifest))
def get_main_parser():
"""
Returns an argparse Parser with all the commandline utils as
subparsers
"""
desc = "General tool used by run task-manifests.json files."
p = get_default_argparser(__version__, desc)
sp = p.add_subparsers(help='Subparser Commands')
def builder(sid_, help_, opt_func_, exe_func_):
return U.subparser_builder(sp, sid_, help_, opt_func_, exe_func_)
# Run command
builder('run', "Convert a Pacbio Input.xml file to Movie FOFN", _add_run_options, _args_run_task_manifest)
builder("to-cmds", "Extract the cmds from manifest.json", _add_manifest_json_option, _args_to_cmd)
builder("inspect", "Pretty-Print a summary of the task-manifestExtract the cmds from manifest.json",
_add_base_options, _args_pprint_task_manifest)
return p
def main(argv=None):
argv_ = sys.argv if argv is None else argv
parser = get_main_parser()
return main_runner_default(argv_[1:], parser, log)
|
1621405
|
class Siamese(tf.keras.Model):
def __init__(self, shared_conv):
super().__init__(self, name="siamese")
self.conv = shared_conv
self.dot = Dot(axes=-1, normalize=True)
def call(self, inputs):
i1, i2 = inputs
x1 = self.conv(i1)
x2 = self.conv(i2)
return self.dot([x1, x2])
model = Siamese(shared_conv)
model.compile(loss=contrastive_loss, optimizer='rmsprop', metrics=[accuracy_sim])
###### binary classification instead of cosine
#
# out = Lambda(lambda x: K.abs(x[0] - x[1]))([x1, x2])
# out = Dense(1, activation="sigmoid")(out)
#
# model = Model(inputs=[i1, i2], outputs=out)
# model.compile(loss="binary_crossentropy", optimizer="rmsprop", metrics=["accuracy"])
|
1621430
|
import json
import base64
import typing
import tempfile
import re
from datetime import datetime
from datetime import timezone
import falcon
from mitmproxy import ctx
from mitmproxy import connections
from mitmproxy import version
from mitmproxy.utils import strutils
from mitmproxy.net.http import cookies
from mitmproxy import http
class AddHeadersResource:
def addon_path(self):
return "additional_headers"
def __init__(self, additional_headers_addon):
self.additional_headers_addon = additional_headers_addon
def on_get(self, req, resp, method_name):
getattr(self, "on_" + method_name)(req, resp)
def on_add_headers(self, req, resp):
for k, v in req.params.items():
self.additional_headers_addon.headers[k] = v
def on_add_header(self, req, resp):
for k, v in req.params.items():
self.additional_headers_addon.headers[k] = v
def on_remove_header(self, req, resp):
self.additional_headers_addon.headers.pop(req.get_param('name'))
def on_remove_all_headers(self, req, resp):
self.additional_headers_addon.headers = {}
class AddHeadersAddOn:
def __init__(self):
self.num = 0
self.headers = {}
def get_resource(self):
return AddHeadersResource(self)
def request(self, flow):
for k, v in self.headers.items():
flow.request.headers[k] = v
addons = [
AddHeadersAddOn()
]
|
1621518
|
from utils import *
class Counter:
def __init__(self, data):
self.__countResult=0
self.__data=data
self.__parse()
def __parse(self):
self.__startCounting()
def __startCounting(self):
for g in groupeData(self.__data):
self.__countYes(g)
def __countYes(self,answersOfGroup):
merged="".join(answersOfGroup)
count = len("".join(set(merged)))
self.__countResult+=count
def getCountResult(self):
return self.__countResult
f=open("input.txt","r")
data=f.readlines()
c=Counter(data)
print("Answer 1: {}".format(c.getCountResult()))
|
1621584
|
from gui.creator import load_bot_config
from gui.creator import load_building_pos
from gui.creator import write_building_pos, write_bot_config
from gui.creator import button
from tkinter import Label, Frame, Text, Scrollbar, Canvas, LabelFrame, Toplevel, Entry, Button
from tkinter import N, W, END, NSEW, INSERT, LEFT
from gui import bot_config_fns as atf
from bot_related.bot import Bot
from tasks.constants import BuildingNames
from PIL import ImageTk, Image
class SelectedDeviceFrame(Frame):
def __init__(self, windows, device, cnf={}, **kwargs):
Frame.__init__(self, windows, kwargs)
self.building_pos_window = None
self.bot = Bot(device)
self.device = device
self.bot_config = load_bot_config(device.serial.replace(':', "_"))
self.bot_building_pos = load_building_pos(device.serial.replace(':', "_"))
self.windows_size = [kwargs['width'], kwargs['height']]
display_frame, self.task_title, self.task_text = self.task_display_frame()
config_frame = self.config_frame()
bottom_frame = self.bottom_frame()
display_frame.grid(row=1, column=0, padx=10, pady=(0, 10), sticky=N + W)
config_frame.grid(row=2, column=0, padx=10, sticky=N + W)
bottom_frame.grid(row=3, column=0, padx=10, pady=(10, 10), sticky=N + W)
# def handle_focus(event):
# if event.widget == self.master.master.master and self.building_pos_window is not None:
# self.building_pos_window.attributes('-topmost', 1)
# self.building_pos_window.attributes('-topmost', 0)
# self.building_pos_window.focus_force()
#
# self.master.master.master.bind("<FocusIn>", handle_focus)
def task_display_frame(self):
width = self.windows_size[0] - 20
height = 210
frame = Frame(self, width=width, height=height)
frame.grid_propagate(False)
frame.columnconfigure(0, weight=width)
frame.rowconfigure(0, weight=5)
frame.rowconfigure(1, weight=5)
frame.rowconfigure(2, weight=height - 10)
dl = Label(frame, text=self.device.serial, width=width, height=5, bg='white')
title = Label(frame, text="Task: None", width=width, height=5)
text = Text(frame, width=width, height=height - 30)
title.config(bg='white', anchor=W, justify=LEFT)
dl.grid(row=0, column=0, pady=(10, 0), sticky=N + W)
title.grid(row=1, column=0, pady=10, sticky=N + W)
text.grid(row=2, column=0, sticky=N + W)
return frame, title, text
def config_frame(self):
frame_canvas = LabelFrame(self,
text='Config',
width=self.windows_size[0],
height=self.windows_size[1] - 100
)
frame_canvas.grid_rowconfigure(0, weight=1)
frame_canvas.grid_columnconfigure(0, weight=1)
frame_canvas.grid_propagate(False)
# Add a canvas in that frame
canvas = Canvas(frame_canvas)
canvas.grid(row=0, column=0, sticky=N + W)
# Link a scrollbar to the canvas
def on_mousewheel(event):
canvas.yview_scroll(int(-1 * (event.delta / 120)), "units")
def bound_to_mousewheel(event):
canvas.bind_all("<MouseWheel>", on_mousewheel)
def unbound_to_mousewheel(event):
canvas.unbind_all("<MouseWheel>")
y_scrollbar = Scrollbar(frame_canvas, orient="vertical", command=canvas.yview)
y_scrollbar.grid(row=0, column=1, sticky='ns')
canvas.configure(yscrollcommand=y_scrollbar.set)
inner_frame = Frame(canvas)
inner_frame.bind('<Enter>', bound_to_mousewheel)
inner_frame.bind('<Leave>', unbound_to_mousewheel)
canvas.create_window((0, 0), window=inner_frame, anchor='nw')
for i in range(len(atf.bot_config_title_fns)):
title_fns, sub_fns = atf.bot_config_title_fns[i]
check = section_frame(
self,
inner_frame,
title_fns,
sub_fns
)
check.grid(row=i, column=0, sticky=N + W)
inner_frame.update_idletasks()
frame_canvas.config(width=self.windows_size[0] - 20, height=self.windows_size[1] - 350)
canvas.config(width=self.windows_size[0] - 20, height=self.windows_size[1] - 350,
scrollregion=canvas.bbox("all"))
return frame_canvas
def start(self):
if self.bot_building_pos is None:
self.bot_config.hasBuildingPos = False
self.bot_building_pos = {}
self.bot.config = self.bot_config
self.bot.building_pos = self.bot_building_pos
self.bot.text_update_event = self.on_task_update
self.bot.building_pos_update_event = lambda **kw: write_building_pos(kw['building_pos'], kw['prefix'])
self.bot.config_update_event = lambda **kw: write_bot_config(kw['config'], kw['prefix'])
self.bot.start(self.bot.do_task)
def stop(self):
self.bot.stop()
self.task_title.config(text='Task: None')
self.task_text.delete(1.0, END)
def bottom_frame(self):
frame = Frame(self)
# start/stop
def on_start_or_stop_click(btn):
if btn.cget('text') == 'Start':
self.start()
btn.config(text='Stop')
elif btn.cget('text') == 'Stop':
self.stop()
btn.config(text='Start')
return
start_button = button(frame, on_start_or_stop_click, text='Start')
start_button.grid(row=0, column=0, padx=(0, 5), sticky=N + W)
#building position setting
def on_building_pos_click(btn):
if self.building_pos_window is None:
self.building_pos_window = building_pos_window(self)
building_pos_button = button(frame, on_building_pos_click, text='Building Pos')
building_pos_button.grid(row=0, column=1, sticky=N + W)
return frame
def on_task_update(self, text):
title, text_list = text['title'], text['text_list']
self.task_title.config(text="Task: " + title)
self.task_text.delete(1.0, END)
for t in text_list:
self.task_text.insert(INSERT, t + '\n')
def section_frame(app, parent, title_component_fn, sub_component_fns=[], start_row=0, start_column=0):
outer_frame = Frame(parent)
inner_frame = Frame(outer_frame)
def disable_when_false(checked):
if checked:
enableChildren(inner_frame)
else:
disableChildren(inner_frame)
title, variable = title_component_fn(app, outer_frame, disable_when_false)
title.grid(row=start_row, column=start_column, sticky=N + W)
inner_frame.grid(row=start_row + 1, column=0, padx=30, pady=0, sticky=N + W)
for row in range(len(sub_component_fns)):
component, _ = sub_component_fns[row](app, inner_frame)
component.grid(row=row, column=0, sticky=N + W)
if not variable.get():
disableChildren(inner_frame)
else:
enableChildren(inner_frame)
return outer_frame
def disableChildren(parent):
children = parent.winfo_children()
for child in children:
wtype = child.winfo_class()
if wtype not in ('Frame', 'Labelframe'):
child.configure(state='disable')
if wtype == 'Menubutton':
child.config(width=8, bg='#F0F0F0')
else:
disableChildren(child)
def enableChildren(parent):
for child in parent.winfo_children():
wtype = child.winfo_class()
if wtype not in ('Frame', 'Labelframe'):
child.configure(state='normal')
if wtype == 'Menubutton':
child.config(width=8, bg='white')
else:
enableChildren(child)
def building_pos_window(parent):
width = 940
height = 360
selected_building = {
'name': None,
'label': None,
'prev_pos': [-1, -1],
}
def building_name_xy_config_frame(master, row, name, pos):
name_label = Label(master, text=name.replace('_', ' ').title())
pos_label = Label(master, text='[{}, {}]'.format(pos[0], pos[1]), width=18)
def on_set_click(btn):
if selected_building['name'] is not None:
selected_building['label'].config(
text='[{}, {}]'.format(
selected_building['prev_pos'][0], selected_building['prev_pos'][1]
)
)
selected_building['name'] = name
selected_building['label'] = pos_label
selected_building['label'].config(text='click on building')
selected_building['prev_pos'] = pos
set_button = button(master, on_set_click, text='Edit', )
name_label.grid(row=row, column=0, pady=2, sticky=W)
pos_label.grid(row=row, column=1, pady=2, sticky=W)
set_button.grid(row=row, column=2, pady=2, sticky=NSEW)
def close_window():
parent.building_pos_window.grab_release()
parent.building_pos_window.destroy()
parent.building_pos_window = None
def image_frame(parent):
frame = Frame(parent.building_pos_window, width=640, height=360)
frame.grid_rowconfigure(0, weight=1)
frame.grid_columnconfigure(0, weight=1)
frame.grid_propagate(False)
frame.grid(row=0, column=0, sticky=N + W)
label = Label(frame, text='Loading city image, Please wait!', background='white')
label.grid(row=0, column=0, sticky=NSEW)
canvas = Canvas(frame, width=640, height=360)
def setBuildingCoords(event):
if selected_building['name'] is None:
return
pos = [event.x * 2, event.y * 2]
if parent.bot_building_pos is None:
parent.bot_building_pos = {}
parent.bot_config.hasBuildingPos = True
parent.bot_building_pos[selected_building['name']] = [pos[0], pos[1]]
write_building_pos(
parent.bot_building_pos,
parent.device.serial.replace(':', "_")
)
selected_building['label'].config(text='[{}, {}]'.format(pos[0], pos[1]))
selected_building['name'] = None
selected_building['label'] = None
canvas.bind("<Button 1>", setBuildingCoords)
def after_image_load():
image = parent.bot.get_city_image().resize((640, 360), Image.ANTIALIAS)
frame.image = image = ImageTk.PhotoImage(image)
canvas.create_image((0, 0), image=image, anchor='nw')
parent.bot.curr_thread = None
label.grid_forget()
canvas.grid(row=0, column=0, sticky=N + W)
parent.bot.start(after_image_load)
return frame
def right_frame(parent):
rf_width = 300
rf_height = 360
# right side frame
frame_right = LabelFrame(parent.building_pos_window,
text="Building Position",
width=rf_width - 10,
height=rf_height - 10,
)
frame_right.grid_rowconfigure(0, weight=1)
frame_right.grid_columnconfigure(0, weight=1)
frame_right.grid_propagate(False)
canvas_right = Canvas(frame_right)
canvas_right.grid(row=0, column=0, sticky=N + W)
# Link a scrollbar to the canvas
def on_mousewheel(event):
canvas_right.yview_scroll(int(-1 * (event.delta / 120)), "units")
def bound_to_mousewheel(event):
canvas_right.bind_all("<MouseWheel>", on_mousewheel)
def unbound_to_mousewheel(event):
canvas_right.unbind_all("<MouseWheel>")
y_scrollbar = Scrollbar(frame_right, orient="vertical", command=canvas_right.yview)
y_scrollbar.grid(row=0, column=1, sticky='ns')
canvas_right.configure(yscrollcommand=y_scrollbar.set)
inner_frame_right = Frame(canvas_right)
inner_frame_right.bind('<Enter>', bound_to_mousewheel)
inner_frame_right.bind('<Leave>', unbound_to_mousewheel)
canvas_right.create_window((0, 0), window=inner_frame_right, anchor='nw')
idx = 0
for e_name in BuildingNames:
building_name_xy_config_frame(
inner_frame_right,
idx,
e_name.value,
parent.bot_building_pos.get(e_name.value, [-1, -1]) if parent.bot_building_pos is not None else [-1, -1]
)
idx = idx + 1
inner_frame_right.update_idletasks()
frame_right.config(width=rf_width - 10, height=360 - 10)
canvas_right.config(width=rf_width - 10, height=360 - 10, scrollregion=canvas_right.bbox("all"))
frame_right.grid(row=0, column=1, padx=5, pady=5, sticky=N + W)
return inner_frame_right
def set_focus(event):
parent.building_pos_window.attributes('-topmost', 1)
parent.building_pos_window.attributes('-topmost', 0)
parent.building_pos_window.focus_force()
parent.building_pos_window = Toplevel(parent.master.master.master)
parent.building_pos_window.resizable(0, 0)
parent.building_pos_window.grab_set()
parent.building_pos_window.title("{} Building Position".format(parent.device.serial.replace(':', "_")))
parent.building_pos_window.geometry("{}x{}".format(width, height))
parent.building_pos_window.protocol("WM_DELETE_WINDOW", close_window)
image_frame(parent)
rf = right_frame(parent)
set_focus(None)
return parent.building_pos_window
|
1621591
|
import angr
import binascii
def main():
p = angr.Project("fake", auto_load_libs=False)
state = p.factory.blank_state(addr=0x4004AC)
inp = state.solver.BVS('inp', 8*8)
state.regs.rax = inp
simgr= p.factory.simulation_manager(state)
simgr.explore(find=0x400684)
found = simgr.found[0]
# We know the flag starts with "ASIS{"
flag_addr = found.regs.rdi
found.add_constraints(found.memory.load(flag_addr, 5) == int(binascii.hexlify(b"ASIS{"), 16))
# More constraints: the whole flag should be printable
flag = found.memory.load(flag_addr, 40)
for i in range(5, 5+32):
cond_0 = flag.get_byte(i) >= ord('0')
cond_1 = flag.get_byte(i) <= ord('9')
cond_2 = flag.get_byte(i) >= ord('a')
cond_3 = flag.get_byte(i) <= ord('f')
cond_4 = found.solver.And(cond_0, cond_1)
cond_5 = found.solver.And(cond_2, cond_3)
found.add_constraints(found.solver.Or(cond_4, cond_5))
# And it ends with a '}'
found.add_constraints(flag.get_byte(32+5) == ord('}'))
# In fact, putting less constraints (for example, only constraining the first
# several characters) is enough to get the final flag, and Z3 runs much faster
# if there are less constraints. I added all constraints just to stay on the
# safe side.
flag_str = found.solver.eval(flag, cast_to=bytes)
return flag_str.rstrip(b'\0')
#print("The number to input: ", found.solver.eval(inp))
#print("Flag:", flag)
# The number to input: 25313971399
# Flag: ASIS{f5f7af556bd6973bd6f2687280a243d9}
def test():
a = main()
assert a == b'ASIS{f5f7af556bd6973bd6f2687280a243d9}'
if __name__ == '__main__':
import logging
logging.getLogger('angr.sim_manager').setLevel(logging.DEBUG)
print(main())
|
1621631
|
import gzip
import json
import os
import random
import shutil
from contextlib import closing
import pandas as pd
def generate_sample_data():
random.seed(42)
files = {}
for month in range(1, 13):
lines = []
for suffix_id in range(1000):
lines.append({
'id': month * 1000 + suffix_id,
'duration': random.randint(1, 100),
'month': month
})
files[month] = lines
return files
def generate_dask_bag_files():
base_path = 'db_data'
if os.path.exists(base_path):
print('Removing old data')
shutil.rmtree(base_path)
print('Generating dask bag data sample...')
os.mkdir(base_path)
files = generate_sample_data()
for month, lines in files.items():
filename = os.path.join(base_path, 'month_{}.json.gz'.format(month))
with closing(gzip.open(filename, 'wt')) as f:
f.write('\n'.join(map(json.dumps, lines)))
def generate_dask_dataframe_files():
base_path = 'dd_data'
if os.path.exists(base_path):
print('Removing old data')
shutil.rmtree(base_path)
print('Generating dask dataframe data sample...')
os.mkdir(base_path)
files = generate_sample_data()
for month, lines in files.items():
filename = os.path.join(base_path, 'month_{}.csv.gz'.format(month))
df = pd.DataFrame(lines)
df.to_csv(filename, compression='gzip', index=False)
def generate_presentation_data():
generate_dask_dataframe_files()
generate_dask_bag_files()
if __name__ == '__main__':
generate_presentation_data()
|
1621652
|
expected_output = {'bridge-domain': 1000,
'peers': {1: {'peer-address': '192.168.255.3', 's': 'Y', 'vc-id': 1000},
2: {'peer-address': '192.168.255.1', 's': 'Y', 'vc-id': 1000}},
'signaling': 'LDP',
'state': 'up',
'type': 'multipoint',
'vfi-name': 'vpls',
'vpn-id': 1000}
|
1621657
|
import os
import scaper
import jams
os.chdir('..')
# FIXTURES
# Paths to files for testing
FG_PATH = 'tests/data/audio/foreground'
BG_PATH = 'tests/data/audio/background'
ALT_FG_PATH = 'tests/data/audio_alt_path/foreground'
ALT_BG_PATH = 'tests/data/audio_alt_path/background'
REG_NAME = 'soundscape_20200501'
# REG_NAME = 'soundscape_20190326_22050'
# REG_WAV_PATH = 'tests/data/regression/soundscape_20170928.wav'
# REG_JAM_PATH = 'tests/data/regression/soundscape_20170928.jams'
# REG_TXT_PATH = 'tests/data/regression/soundscape_20170928.txt'
REG_BGONLY_NAME = 'bgonly_soundscape_20200501'
# REG_BGONLY_NAME = 'bgonly_soundscape_20190326_22050'
# REG_BGONLY_WAV_PATH = 'tests/data/regression/bgonly_soundscape_20170928.wav'
# REG_BGONLY_JAM_PATH = 'tests/data/regression/bgonly_soundscape_20170928.jams'
# REG_BGONLY_TXT_PATH = 'tests/data/regression/bgonly_soundscape_20170928.txt'
REG_REVERB_NAME = 'reverb_soundscape_20200501'
# REG_REVERB_NAME = 'reverb_soundscape_20190326_22050'
# REG_REVERB_WAV_PATH = 'tests/data/regression/reverb_soundscape_20170928.wav'
# REG_REVERB_JAM_PATH = 'tests/data/regression/reverb_soundscape_20170928.jams'
# REG_REVERB_TXT_PATH = 'tests/data/regression/reverb_soundscape_20170928.txt'
# fg and bg labels for testing
FB_LABELS = ['car_horn', 'human_voice', 'siren']
BG_LABELS = ['park', 'restaurant', 'street']
SAMPLE_RATES = [22050, 44100]
def test_names(name, rate, exts=('wav', 'jams', 'txt')):
return [os.path.join('tests/data/regression', '{}_{}.{}'.format(name, rate, ext)) for ext in exts]
for rate in SAMPLE_RATES:
test_names(REG_NAME, rate)
print("==========USING BELOW FOR TESTS==============")
VAR_NAMES_PARTIAL = ('REG', 'REG_BGONLY', 'REG_REVERB')
FILE_BASENAMES = (REG_NAME, REG_BGONLY_NAME, REG_REVERB_NAME)
FILE_TYPES = ('WAV', 'JAM', 'TXT')
for var, name in zip(VAR_NAMES_PARTIAL, FILE_BASENAMES):
for type, path in zip(FILE_TYPES, test_names(name, rate)):
print("{}_{}_PATH = '{}'".format(var, type, path))
print()
print("==========USING ABOVE FOR TESTS==============")
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -50
sc.sr = rate
# background
sc.add_background(
label=('const', 'park'),
source_file=(
'const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
# foreground events
sc.add_event(
label=('const', 'siren'),
source_file=('const',
'tests/data/audio/foreground/'
'siren/69-Siren-1.wav'),
source_time=('const', 5),
event_time=('const', 2),
event_duration=('const', 5),
snr=('const', 5),
pitch_shift=None,
time_stretch=None)
sc.add_event(
label=('const', 'car_horn'),
source_file=('const',
'tests/data/audio/foreground/'
'car_horn/17-CAR-Rolls-Royce-Horn.wav'),
source_time=('const', 0),
event_time=('const', 5),
event_duration=('const', 2),
snr=('const', 20),
pitch_shift=('const', 1),
time_stretch=None)
sc.add_event(
label=('const', 'human_voice'),
source_file=('const',
'tests/data/audio/foreground/'
'human_voice/42-Human-Vocal-Voice-taxi-2_edit.wav'),
source_time=('const', 0),
event_time=('const', 7),
event_duration=('const', 2),
snr=('const', 10),
pitch_shift=None,
time_stretch=('const', 1.2))
wav_file, jam_file, txt_file = test_names(REG_NAME, rate)
sc.generate(wav_file, jam_file, txt_path=txt_file, disable_instantiation_warnings=True)
print('Wrote:', wav_file, jam_file, txt_file)
wav_file, jam_file, txt_file = test_names(REG_REVERB_NAME, rate)
sc.generate(wav_file, jam_file, txt_path=txt_file, reverb=0.2, disable_instantiation_warnings=True)
print('Wrote:', wav_file, jam_file, txt_file)
jams.load(jam_file)
# soundscape with only one event will use transformer (regression test)
sc = scaper.Scaper(10.0, fg_path=FG_PATH, bg_path=BG_PATH)
sc.ref_db = -20
sc.sr = rate
# background
sc.add_background(
label=('const', 'park'),
source_file=('const',
'tests/data/audio/background/park/'
'268903__yonts__city-park-tel-aviv-israel.wav'),
source_time=('const', 0))
wav_file, jam_file, txt_file = test_names(REG_BGONLY_NAME, rate)
sc.generate(wav_file, jam_file, txt_path=txt_file, reverb=0.2, disable_instantiation_warnings=True)
print('Wrote:', wav_file, jam_file, txt_file)
|
1621691
|
import uuid
import json
from gbdxtools.vector_styles import CircleStyle, LineStyle, FillStyle
class VectorLayer(object):
""" Abstract constructor for a vector layer knowing how to render itself as javascript.
Args:
styles (list): list of styles for which to create layers
"""
def __init__(self, styles=None, **kwargs):
self.source_id = uuid.uuid4().hex
if styles is not None:
self.styles = styles
else:
# nothing defined, so give them defaults
self.styles = [CircleStyle(**kwargs), LineStyle(**kwargs), FillStyle(**kwargs)]
def _layer_def(self, style):
""" Constructs a layer def with the proper fields
- implemented in subclasses
Returns:
layer (dict): a layer json dict used for adding to maps
"""
raise NotImplementedError()
def _datasource_def(self):
""" Constructs a datasource def appropriate for the layer type
- implemented in subclasses
Returns
datasource (dict): a datasource json dict used for adding data to maps
"""
raise NotImplementedError()
@property
def datasource(self):
""" Renders the datasource to add to the map, referenced by the layers
created by this layer instance.
Returns:
datasource (dict): a datasource json dict used for adding data to maps
"""
return {'id': self.source_id, 'data': self._datasource_def()}
@property
def layers(self):
""" Renders the list of layers to add to the map.
Returns:
layers (list): list of layer entries suitable for use in mapbox-gl 'map.addLayer()' call
"""
layers = [self._layer_def(style) for style in self.styles]
return layers
class VectorGeojsonLayer(VectorLayer):
""" Represents a vector layer created from a geojson source
Args:
geojson (dict): a list of geojson features to render
styles (list): A list of style objects to be applied to the layer
"""
def __init__(self, geojson, **kwargs):
super(VectorGeojsonLayer, self).__init__(**kwargs)
self.geojson = geojson
def _datasource_def(self):
return {'type': 'geojson', 'data': self.geojson}
def _layer_def(self, style):
return {
'id': type(style).__name__, # TODO - make this unique in the various styles
'type': style.type,
'source': self.source_id,
'paint': style.paint()
}
class VectorTileLayer(VectorLayer):
""" Represents a vector tile layer in a tile map
Args:
url (str): a vector tile url template
source_name (str): the name of the source layer in the vector tiles
styles (list): A list of style objects to be applied to the layer
"""
def __init__(self, url=None, source_layer_name='GBDX_Task_Output', **kwargs):
super(VectorTileLayer, self).__init__(**kwargs)
self.url = url
self.source_layer_name = source_layer_name
def _datasource_def(self):
return {'type': 'vector', 'tiles': [self.url]}
def _layer_def(self, style):
return {
'id': type(style).__name__, # TODO - make this unique in the various styles
'type': style.type,
'source': self.source_id,
'source-layer': self.source_layer_name,
'paint': style.paint()
}
class ImageLayer(object):
""" A layer for rendering images and image arrays to slippy maps
Args:
image (str): a vector tile url template
coordinates: the coordinate bounds (list of polygon corners) for placing the image
Returns:
An string of the layer definition
"""
def __new__(self, image, coordinates):
return json.dumps({
"id": uuid.uuid4().hex,
"type": "raster",
"source": {
"type": "image",
"url": image,
"coordinates": coordinates
}
})
|
1621746
|
import os
import re
import numpy as np
import trimesh
def save_mesh(mesh, save_path):
if isinstance(mesh.visual, trimesh.visual.texture.TextureVisuals):
save_path = os.path.join(os.path.dirname(save_path),
os.path.basename(os.path.splitext(save_path)[0]),
os.path.basename(save_path))
os.makedirs(os.path.dirname(save_path), exist_ok=True)
trimesh.exchange.export.export_mesh(mesh, save_path)
def load_mesh(path, mesh_only=False):
mesh = trimesh.load_mesh(path)
if mesh_only:
mesh = trimesh.Trimesh(vertices=mesh.vertices, faces=mesh.faces)
return mesh
class MeshExtractor:
def extract_mesh(self, *args, **kwargs):
raise NotImplementedError
class MeshIO(dict):
def __init__(self, meshes=None):
if meshes is None:
meshes = {}
self.mesh_path = {}
super().__init__(meshes)
@classmethod
def from_file(cls, key_path_pair: (dict, list)):
mesh_io = cls()
if isinstance(key_path_pair, list):
key_path_pair = {i: p for i, p in enumerate(key_path_pair)}
mesh_io.mesh_path = key_path_pair
return mesh_io
def __getitem__(self, item):
if item not in super().keys():
mesh = load_mesh(self.mesh_path[item], mesh_only=True)
super().__setitem__(item, mesh)
return super().__getitem__(item)
def load(self):
for k in self.mesh_path.keys():
self.__getitem__(k)
return self
def merge(self):
return sum([m for m in self.values()]) if self else trimesh.Trimesh()
def save(self, folder):
os.makedirs(folder, exist_ok=True)
for k, v in self.items():
save_mesh(v, os.path.join(folder, f"{k}.obj"))
#cross product of vectors a and b
def cross(a, b):
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
return (x, y, z)
# determinant of matrix a
def det(a):
return a[0][0]*a[1][1]*a[2][2] + a[0][1]*a[1][2]*a[2][0] + a[0][2]*a[1][0]*a[2][1] - a[0][2]*a[1][1]*a[2][0] - a[0][1]*a[1][0]*a[2][2] - a[0][0]*a[1][2]*a[2][1]
# unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = det([[1,a[1],a[2]],
[1,b[1],b[2]],
[1,c[1],c[2]]])
y = det([[a[0],1,a[2]],
[b[0],1,b[2]],
[c[0],1,c[2]]])
z = det([[a[0],a[1],1],
[b[0],b[1],1],
[c[0],c[1],1]])
magnitude = (x**2 + y**2 + z**2)**.5
if magnitude == 0.:
return (0., 0., 0.)
else:
return (x/magnitude, y/magnitude, z/magnitude)
#dot product of vectors a and b
def dot(a, b):
return a[0]*b[0] + a[1]*b[1] + a[2]*b[2]
#area of polygon poly
def get_area(poly):
if len(poly) < 3: # not a plane - no area
return 0
total = [0, 0, 0]
for i in range(len(poly)):
vi1 = poly[i]
if i is len(poly)-1:
vi2 = poly[0]
else:
vi2 = poly[i+1]
prod = cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
def calculate_face_area(data):
face_areas = []
for face in data['f']:
vid_in_face = [int(item.split('/')[0]) for item in face]
face_area = get_area(data['v'][np.array(vid_in_face) - 1,:3].tolist())
face_areas.append(face_area)
return face_areas
def sample_pnts_from_obj(data, n_pnts = 5000, mode = 'uniform'):
# sample points on each object mesh.
flags = data.keys()
all_pnts = data['v'][:,:3]
area_list = np.array(calculate_face_area(data))
distribution = area_list/np.sum(area_list)
# sample points the probability depends on the face area
new_pnts = []
if mode == 'random':
random_face_ids = np.random.choice(len(data['f']), n_pnts, replace=True, p=distribution)
random_face_ids, sample_counts = np.unique(random_face_ids, return_counts=True)
for face_id, sample_count in zip(random_face_ids, sample_counts):
face = data['f'][face_id]
vid_in_face = [int(item.split('/')[0]) for item in face]
weights = np.diff(np.sort(np.vstack(
[np.zeros((1, sample_count)), np.random.uniform(0, 1, size=(len(vid_in_face) - 1, sample_count)),
np.ones((1, sample_count))]), axis=0), axis=0)
new_pnt = all_pnts[np.array(vid_in_face) - 1].T.dot(weights)
if 'vn' in flags:
nid_in_face = [int(item.split('/')[2]) for item in face]
new_normal = data['vn'][np.array(nid_in_face)-1].T.dot(weights)
new_pnt = np.hstack([new_pnt, new_normal])
new_pnts.append(new_pnt.T)
random_pnts = np.vstack(new_pnts)
else:
for face_idx, face in enumerate(data['f']):
vid_in_face = [int(item.split('/')[0]) for item in face]
n_pnts_on_face = distribution[face_idx] * n_pnts
if n_pnts_on_face < 1:
continue
dim = len(vid_in_face)
npnts_dim = (np.math.factorial(dim - 1)*n_pnts_on_face)**(1/(dim-1))
npnts_dim = int(npnts_dim)
weights = np.stack(np.meshgrid(*[np.linspace(0, 1, npnts_dim) for _ in range(dim - 1)]), 0)
weights = weights.reshape(dim - 1, -1)
last_column = 1 - weights.sum(0)
weights = np.vstack([weights, last_column])
weights = weights[:, last_column >= 0]
new_pnt = (all_pnts[np.array(vid_in_face) - 1].T.dot(weights)).T
if 'vn' in flags:
nid_in_face = [int(item.split('/')[2]) for item in face]
new_normal = data['vn'][np.array(nid_in_face) - 1].T.dot(weights)
new_pnt = np.hstack([new_pnt, new_normal])
new_pnts.append(new_pnt)
random_pnts = np.vstack(new_pnts)
return random_pnts
def normalize_to_unit_square(points, keep_ratio=True):
centre = (points.max(0) + points.min(0)) / 2.
point_shapenet = points - centre
if keep_ratio:
scale = point_shapenet.max()
else:
scale = point_shapenet.max(0)
point_shapenet = point_shapenet / scale
return point_shapenet, centre, scale
def read_obj(model_path, flags=('v')):
fid = open(model_path, 'r')
data = {}
for head in flags:
data[head] = []
for line in fid:
# line = line.strip().split(' ')
line = re.split('\s+', line.strip())
if line[0] in flags:
data[line[0]].append(line[1:])
fid.close()
if 'v' in data.keys():
data['v'] = np.array(data['v']).astype(np.float)
if 'vt' in data.keys():
data['vt'] = np.array(data['vt']).astype(np.float)
if 'vn' in data.keys():
data['vn'] = np.array(data['vn']).astype(np.float)
return data
def write_obj(objfile, data):
with open(objfile, 'w+') as file:
for item in data['v']:
file.write('v' + ' %f' * len(item) % tuple(item) + '\n')
for item in data['f']:
file.write('f' + ' %s' * len(item) % tuple(item) + '\n')
|
1621786
|
import math
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, preprocessing
from cpca import CPCA
from ccpca import CCPCA
dataset = datasets.load_wine()
X = dataset.data
y = dataset.target
X = preprocessing.scale(X)
cpca = CPCA()
# manual alpha selection
# cpca.fit(fg=X[y == 0], bg=X[y != 0], alpha=2.15)
# auto alpha selection
cpca.fit(fg=X[y == 0], bg=X[y != 0])
X_r = cpca.transform(X)
plt.figure()
colors = ['navy', 'turquoise', 'darkorange']
lw = 2
for color, i, target_name in zip(colors, [0, 1, 2], [0, 1, 2]):
plt.scatter(
X_r[y == i, 0],
X_r[y == i, 1],
color=color,
alpha=.8,
lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title(f'cPCA of IRIS dataset (alpha={cpca.get_best_alpha()})')
plt.show()
ccpca = CCPCA()
# apply fit and transform seaparately
# ccpca.fit(X[y == 0], X[y != 0], var_thres_ratio=0.5, max_log_alpha=0.5)
# X_r2 = ccpca.transform(X)
# apply fit and transform at the same time
X_r2 = ccpca.fit_transform(
X[y == 0], X[y != 0], var_thres_ratio=0.5, max_log_alpha=0.5)
plt.figure()
for color, i, target_name in zip(colors, [0, 1, 2], [0, 1, 2]):
plt.scatter(
X_r2[y == i, 0],
X_r2[y == i, 1],
color=color,
alpha=.8,
lw=lw,
label=target_name)
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title(f'ccPCA of IRIS dataset (alpha ={ccpca.get_best_alpha()})')
plt.show()
|
1621804
|
import torch
import torch.nn as nn
from torch.nn import init
class SupervisedGraphSage(nn.Module):
def __init__(self, num_classes, enc):
super(SupervisedGraphSage, self).__init__()
self.enc = enc
self.xent = nn.CrossEntropyLoss()
self.weight = nn.Parameter(torch.FloatTensor(num_classes, enc.embed_dim))
init.xavier_uniform(self.weight)
def forward(self, nodes, full_nodes):
embeds = self.enc(nodes, full_nodes)
scores = self.weight.mm(embeds)
return scores.t()
def loss(self, nodes, full_nodes, labels):
scores = self.forward(nodes, full_nodes)
return self.xent(scores, labels.squeeze())
|
1621826
|
from typing import Dict, Tuple, Union
from tensorflow.keras.layers import Concatenate, Dense, Dropout, GlobalAveragePooling1D, Input
from tensorflow.keras.models import Model
from tensorflow.python.keras.losses import Loss
from tensorflow.python.keras.optimizers import Optimizer
from marketml.models.time import Time2Vector
from marketml.models.transformer import TransformerEncoder
class PricePredictor:
"""Predict future stock prices with time series/sequence models."""
def __init__(
self,
sequence_shape: Tuple[int, int],
):
self.sequence_length, self.sequence_width = sequence_shape
self.model = None
def build_transformer(self, transformer: Dict, compile: bool = True):
"""Build a transformer model according to provided parameters."""
time_embedding = Time2Vector(self.sequence_length)
num_attention_layers = transformer["num_attention_layers"]
num_attention_heads = transformer["num_attention_heads"]
attention_key_size, attention_value_size, attention_dense_size = transformer[
"attention_sizes"
]
output_dense_sizes = transformer["output_dense_sizes"]
dropout = transformer["dropout"]
attention_layers = [
TransformerEncoder(
attention_key_size, attention_value_size, num_attention_heads, attention_dense_size
)
for _ in range(num_attention_layers)
]
input_sequence = Input(shape=(self.sequence_length, self.sequence_width))
x = time_embedding(input_sequence)
x = Concatenate(axis=-1)([input_sequence, x])
for idx in range(num_attention_layers):
x = attention_layers[idx]((x, x, x))
x = GlobalAveragePooling1D(data_format="channels_first")(x)
for layer_size in range(len(output_dense_sizes)):
if dropout is not None:
x = Dropout(dropout)(x)
x = Dense(layer_size, activation="relu")(x)
if dropout is not None:
x = Dropout(dropout)(x)
y = Dense(1, activation="linear")(x)
model = Model(inputs=input_sequence, outputs=y)
if compile:
# TODO: Add optimizer, loss params
model = self.compile_model(model, None, None)
self.model = model
return model
@staticmethod
def compile_model(model: Model, optimizer, loss):
optimizer = "adam" if optimizer is None else optimizer
loss = "mse" if loss is None else loss
model.compile(loss=loss, optimizer=optimizer, metrics=["mae", "mape"])
return model
def build(self):
pass
def train(self):
pass
def predict(self):
pass
|
1621872
|
from .header import *
from .test import TestAgent
'''
Pre-training and Fine-tuning the GPT2
In pre-training stage, GPTModel + lm _head
In Fine-tuning stage, GPTModel + Transformer_matrix + lm_head
'''
class PFGPT2(nn.Module):
def __init__(self, vocab_size, unk_id, sep_id, topk, topp,
config_path='data/config/model_config_dialogue_small.json'):
super(PFGPT2, self).__init__()
self.model_config = GPT2Config.from_json_file(config_path)
self.model = GPT2LMHeadModel(config=self.model_config)
self.model.resize_token_embeddings(vocab_size)
self.n_ctx = self.model.config.to_dict().get('n_ctx')
self.topk, self.topp = topk, topp
self.unk_id = unk_id
self.sep_id = sep_id
def forward(self, inpt_ids):
# inpt_ids: [batch, seq]
attn_mask = generate_attention_mask(inpt_ids)
outputs = self.model(
input_ids=inpt_ids,
attention_mask=attn_mask)
output = outputs[0] # [batch, seq, vocab]
return output
def predict(self, inpt_ids, max_len):
'''
batch_size is 1
inpt_ids: [seq]
return a list of ids (generated)
no pad, do not need attention_mask
'''
with torch.no_grad():
generated = []
for _ in range(max_len):
outputs = self.model(input_ids=inpt_ids)
next_token_logits = outputs[0][-1, :] # [vocab]
# ignore the [UNK] token
next_token_logits[self.unk_id] = -np.inf
filtered_logits = top_k_top_p_filtering(
next_token_logits,
top_k=self.topk,
top_p=self.topp)
next_token = torch.multinomial(
F.softmax(filtered_logits, dim=-1),
num_samples=1)
if next_token == self.sep_id:
break
generated.append(next_token.item())
inpt_ids = torch.cat((inpt_ids, next_token), dim=0)
# remember to cut off
inpt_ids = inpt_ids[-self.n_ctx:]
return generated
@torch.no_grad()
def predict_batch(self, inpt_ids, max_len):
'''
inpt_ids: [batch, seq]
'''
# change inpt_ids from [seq] to [batch, seq]
generated = []
prev, past = inpt_ids, None
for _ in range(max_len):
outputs = self.model(input_ids=prev, past=past) # [batch, seq, vocab]
output, past = outputs[:2]
next_token_logits = output[:, -1, :] # [batch, vocab]
next_token_logits[:, self.unk_id] = -np.inf
filtered_logits = top_k_top_p_filtering_batch(
next_token_logits,
top_k=self.topk,
top_p=self.topp)
next_token = torch.multinomial(
F.softmax(filtered_logits, dim=-1),
num_samples=1) # [batch, 1]
generated.append([token.item() for token in next_token.squeeze(1)])
prev = next_token
# transpose
ng, batch_size = [], len(generated[0])
for i in range(batch_size):
ng.append([g[i] for g in generated])
return ng
class PFGPT2Agent(BaseAgent):
'''
run_mode:
1. train: direcctly fine tuning the LM model
2. train_trs: fine tuning with the semantic transformer matrix
'''
def __init__(self, total_steps, multi_gpu, vocab_file='data/vocab/vocab_small', run_mode='train', lang='zh'):
super(PFGPT2Agent, self).__init__()
# hyperparameters
try:
# self.gpu_ids = [int(i) for i in multi_gpu.split(',')]
self.gpu_ids = list(range(len(multi_gpu.split(','))))
except:
raise Exception(f'[!] multi gpu ids are needed, but got: {multi_gpu}')
assert run_mode in ['train', 'test', 'train_trs', 'rerank', 'rerank_ir'], f'[!] running mode must be train or test, but got {run_mode}'
vocab_file = 'data/vocab/vocab_small' if lang == 'zh' else 'data/vocab/vocab_english'
self.args = {
'lr': 1e-5,
'grad_clip': 1.0,
'pad': 0,
'tgt_len_size': 50,
'lr_gamma': 0.5,
'patience': 5,
'min_lr': 1e-5,
'warmup_steps': 2000,
'total_steps': total_steps,
'topk': 20,
'topp': 1.0,
'config_path': 'data/config/model_lm_small.json',
'pretrain_model': 'ckpt/LM/gpt2lm/best.pt',
'multi_gpu': self.gpu_ids,
'run_mode': run_mode,
'vocab_file': vocab_file,
'lang': lang,
'topic_transfer': {'音乐': 'music', '体育': 'sport', '数码电子': 'electric', '美食': 'food', '电影': 'movie'}
}
# hyperparameters
self.vocab = BertTokenizer(vocab_file=self.args['vocab_file'])
self.vocab_size = len(self.vocab)
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.model = PFGPT2(
self.vocab_size,
self.unk,
self.sep,
self.args['topk'],
self.args['topp'],
config_path=self.args['config_path']
)
self.criterion = nn.CrossEntropyLoss(ignore_index=self.args['pad'], reduction='sum')
self.optimizer = transformers.AdamW(
self.model.parameters(),
lr=self.args['lr'],
correct_bias=True)
# need to obtain the whole iter
self.warmup_scheduler = transformers.get_linear_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.args['warmup_steps'],
num_training_steps=self.args['total_steps'])
if torch.cuda.is_available():
self.model.cuda()
# train: DataParallel; test: no DataParallel
if self.args['run_mode'] == 'train':
self.model = DataParallel(self.model, device_ids=self.gpu_ids)
# run_mode == 'chatbot', use the bertretrieval for reranking
if run_mode in ['rerank', 'rerank_ir']:
from multiview import MultiView
print(f'[!] MultiView reranker model will be initized')
self.reranker = MultiView(
topic=True,
logic=False,
nli=False,
coherence=True,
fluency=False,
coherence_path='ckpt/train_retrieval/bertretrieval/best.pt',
topic_path='ckpt/fasttext/model.bin',
)
print(f'[!] load multiview model over')
if run_mode == 'rerank_ir':
self.ir_agent = TestAgent()
self.show_parameters(self.args)
# load the LM pretrain model
self.load_model(self.args['pretrain_model'])
def train_model_transformer(self, train_iter, mode='train'):
pass
def train_model(self, train_iter, mode='train'):
if self.args['run_mode'] == 'train':
self.train_model_directly(train_iter)
elif self.args['run_mode'] == 'train_trs':
self.train_model_transformer(train_iter)
else:
raise Exception(f'[!] unknow training mode: {self.args["run_mode"]}')
def train_model_directly(self, train_iter, mode='train'):
self.model.train()
total_loss, total_acc, batch_num = 0, [], 0
pbar = tqdm(train_iter)
for idx, batch in enumerate(pbar):
cid = batch
self.optimizer.zero_grad()
logits = self.model(cid) # [batch, seq, vocab]
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = cid[..., 1:].contiguous()
loss = self.criterion(
shift_logits.view(-1, shift_logits.size(-1)),
shift_labels.view(-1))
_, preds = shift_logits.max(dim=-1) # [batch, seq]
# ignore the pad
not_ignore = shift_labels.ne(self.args['pad']) # pad is 0 or 1
num_targets = not_ignore.long().sum().item() # the number of not pad tokens
correct = (shift_labels == preds) & not_ignore
correct = correct.float().sum()
# loss and token accuracy
accuracy = correct / num_targets
total_acc.append(accuracy)
loss = loss / num_targets
if mode == 'train':
loss.backward()
clip_grad_norm_(self.model.parameters(), self.args['grad_clip'])
self.optimizer.step()
self.warmup_scheduler.step()
total_loss += loss.item()
batch_num += 1
pbar.set_description(f'[!] lr: {self.args["lr"]}, train loss: {round(loss.item(), 4)}, token acc: {round(accuracy.item(), 4)}')
return round(total_loss/batch_num, 4)
def test_model(self, test_iter, path):
'''
Generate the test dataset and measure the performance
'''
def filter(x):
return x.replace('[PAD]', '')
self.model.eval()
pbar = tqdm(test_iter)
with open(path, 'w') as f:
for batch in pbar:
c, r = batch
max_size = max(len(r), self.args['tgt_len_size'])
tgt = self.model.predict(c, max_size)
text = self.vocab.convert_ids_to_tokens(tgt)
tgt = ''.join(text)
ctx = self.vocab.convert_ids_to_tokens(c)
ctx = filter(''.join(ctx))
ref = self.vocab.convert_ids_to_tokens(r)
ref = filter(''.join(ref))
f.write(f'CTX: {ctx}\n')
f.write(f'REF: {ref}\n')
f.write(f'TGT: {tgt}\n\n')
print(f'[!] translate test dataset over, write into {path}')
# measure the performance
(b1, b2, b3, b4), ((r_max_l, r_min_l, r_avg_l), (c_max_l, c_min_l, c_avg_l)), (dist1, dist2, rdist1, rdist2), (average, extrema, greedy) = cal_generative_metric(path, lang=self.args['lang'])
print(f'[TEST] BLEU: {b1}/{b2}/{b3}/{b4}; Length(max, min, avg): {c_max_l}/{c_min_l}/{c_avg_l}|{r_max_l}/{r_min_l}/{r_avg_l}; Dist: {dist1}/{dist2}|{rdist1}/{rdist2}; Embedding(average/extrema/greedy): {average}/{extrema}/{greedy}')
@torch.no_grad()
def talk(self, topic, msgs, maxlen=30, batch_size=16):
'''
topic, msgs: msgs is a string which split with the [SEP] token
batch size is 1
'''
# tokenizer
if self.args['run_mode'] == 'test':
msgs = torch.LongTensor(self.vocab.encode(msgs))
msgs = to_cuda(msgs)
tgt = self.model.predict(msgs, maxlen)
tgt = self.vocab.convert_ids_to_tokens(tgt)
tgt = ''.join(tgt)
return tgt
elif self.args['run_mode'] in ['rerank', 'rerank_ir']:
# ========== predict_batch ==========
msgs_ = self.vocab.encode(msgs)
msgs_ = [deepcopy(msgs_) for _ in range(batch_size)]
msgs_ = torch.LongTensor(msgs_) # [batch, seq]
msgs_ = to_cuda(msgs_)
tgt = self.model.predict_batch(msgs_, maxlen)
tgt = [self.vocab.convert_ids_to_tokens(i) for i in tgt]
# cut from the first [SEP] token
n_tgt = []
for i in tgt:
if '[SEP]' in i:
i = i[:i.index('[SEP]')]
n_tgt.append(''.join(i))
# multiview scores
# rerank_ir also use the fast retrieval model
if self.args['run_mode'] == 'rerank_ir':
retrieval_rest = self.ir_agent.model.search(topic, msgs, samples=batch_size)
retrieval_rest = [i['response'] for i in retrieval_rest]
n_tgt.extend(retrieval_rest)
contexts = [msgs] * len(n_tgt)
topic = [self.args['topic_transfer'][topic]] * len(n_tgt)
scores = self.reranker(contexts, n_tgt, topic=topic)[0]
index = np.argmax(scores)
response = n_tgt[index]
return response
else:
raise Exception(f'[!] error in gpt2 model `talk` function')
|
1621917
|
import urllib2
from bs4 import BeautifulSoup
import pyisbn
import re
import time
import mechanize
def infibeam(isbn):
i_link = "http://www.infibeam.com/Books/search?q="+isbn
br = mechanize.Browser()
#br.set_all_readonly(False) # allow everything to be written to
br.set_handle_robots(False) # ignore robots
br.set_handle_refresh(False) # can sometimes hang without this
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1')] # [('User-agent', 'Firefox')]
try:
response = br.open(i_link)
except Exception, e:
return {'price':'NA','url':i_link}
i_soup = BeautifulSoup(response.read())
i_class = i_soup.findAll('span',class_='final-price')
if not i_class:
return {'price':'NA','url':i_link}
try:
i_price = re.findall(r'</span> (.*?)</span>',str(i_class))[0]
except Exception, e:
return {'price':'NA','url':i_link}
return {'price':i_price,'url':i_link}
|
1621961
|
import glob
import json
import os
import sys
from copy import deepcopy
from distutils.version import LooseVersion
from typing import Optional, Union
import click
import networkx as nx
from requests import RequestException
from demisto_sdk.commands.common import constants
from demisto_sdk.commands.common.constants import GENERIC_COMMANDS_NAMES
from demisto_sdk.commands.common.tools import (get_content_id_set,
is_external_repository,
print_error, print_warning)
from demisto_sdk.commands.common.update_id_set import merge_id_sets
from demisto_sdk.commands.create_id_set.create_id_set import IDSetCreator
MINIMUM_DEPENDENCY_VERSION = LooseVersion('6.0.0')
COMMON_TYPES_PACK = 'CommonTypes'
def parse_for_pack_metadata(dependency_graph: nx.DiGraph, graph_root: str, verbose: bool = False,
complete_data: bool = False, id_set_data=None) -> tuple:
"""
Parses calculated dependency graph and returns first and all level parsed dependency.
Additionally returns list of displayed pack images of all graph levels.
Args:
dependency_graph (DiGraph): dependency direct graph.
graph_root (str): graph root pack id.
verbose(bool): Whether to print the log to the console.
complete_data (bool): whether to update complete data on the dependent packs.
id_set_data (dict): id set data.
Returns:
dict: first level dependencies parsed data.
list: all level pack dependencies ids (is used for displaying dependencies images).
"""
if id_set_data is None:
id_set_data = {}
first_level_dependencies = {}
parsed_dependency_graph = [(k, v) for k, v in dependency_graph.nodes(data=True) if
dependency_graph.has_edge(graph_root, k)]
for dependency_id, additional_data in parsed_dependency_graph:
pack_name = find_pack_display_name(dependency_id)
if not complete_data:
additional_data['display_name'] = pack_name
else:
dependency_data = id_set_data.get('Packs', {}).get(dependency_id)
if dependency_data:
additional_data['name'] = dependency_data['name']
additional_data['author'] = dependency_data['author']
additional_data['minVersion'] = dependency_data['current_version']
additional_data['certification'] = dependency_data['certification']
else:
additional_data['display_name'] = pack_name
first_level_dependencies[dependency_id] = additional_data
all_level_dependencies = [n for n in dependency_graph.nodes if dependency_graph.in_degree(n) > 0]
if verbose:
click.secho(f'All level dependencies are: {all_level_dependencies}', fg='white')
return first_level_dependencies, all_level_dependencies
def find_pack_path(pack_folder_name: str) -> list:
"""
Find pack path matching from content repo root directory.
Args:
pack_folder_name (str): pack folder name.
Returns:
list: pack metadata json path.
"""
pack_metadata_path = os.path.join(constants.PACKS_DIR, pack_folder_name, constants.PACKS_PACK_META_FILE_NAME)
found_path_results = glob.glob(pack_metadata_path)
return found_path_results
def find_pack_display_name(pack_folder_name: str) -> str:
"""
Returns pack display name from pack_metadata.json file.
Args:
pack_folder_name (str): pack folder name.
Returns:
str: pack display name from pack metaata
"""
found_path_results = find_pack_path(pack_folder_name)
if not found_path_results:
return pack_folder_name
pack_metadata_path = found_path_results[0]
with open(pack_metadata_path, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_display_name = pack_metadata.get('name') if pack_metadata.get('name') else pack_folder_name
return pack_display_name
def update_pack_metadata_with_dependencies(pack_folder_name: str, first_level_dependencies: dict) -> None:
"""
Updates pack metadata with found parsed dependencies results.
Args:
pack_folder_name (str): pack folder name.
first_level_dependencies (dict): first level dependencies data.
"""
found_path_results = find_pack_path(pack_folder_name)
if not found_path_results:
print_error(f"{pack_folder_name} {constants.PACKS_PACK_META_FILE_NAME} was not found")
sys.exit(1)
pack_metadata_path = found_path_results[0]
with open(pack_metadata_path, 'r+') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata = {} if not isinstance(pack_metadata, dict) else pack_metadata
pack_metadata['dependencies'] = first_level_dependencies
pack_metadata['displayedImages'] = list(first_level_dependencies.keys())
pack_metadata_file.seek(0)
json.dump(pack_metadata, pack_metadata_file, indent=4)
pack_metadata_file.truncate()
def get_merged_official_and_local_id_set(local_id_set: dict, silent_mode: bool = False) -> dict:
"""Merging local idset with content id_set
Args:
local_id_set: The local ID set (when running in a local repo)
silent_mode: When True, will not print logs. False will print logs.
Returns:
A unified id_set from local and official content
"""
try:
official_id_set = get_content_id_set()
except RequestException as exception:
raise RequestException(
f'Could not download official content from {constants.OFFICIAL_CONTENT_ID_SET_PATH}\n'
f'Stopping execution.'
) from exception
unified_id_set, duplicates = merge_id_sets(
official_id_set,
local_id_set,
print_logs=not silent_mode
)
return unified_id_set.get_dict()
class PackDependencies:
"""
Pack dependencies calculation class with relevant static methods.
"""
@staticmethod
def _search_for_pack_items(pack_id: str, items_list: list) -> list:
"""
Filtering of content items that belong to specific pack.
Args:
pack_id (str): pack id.
items_list (list): specific section of id set.
Returns:
list: collection of content pack items.
"""
return list(filter(lambda s: next(iter(s.values())).get('pack') == pack_id, items_list))
@staticmethod
def _search_packs_by_items_names(items_names: Union[str, list],
items_list: list,
exclude_ignored_dependencies: bool = True) -> set:
"""
Searches for implemented script/integration/playbook.
Args:
items_names (str or list): items names to search.
items_list (list): specific section of id set.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: found pack ids.
"""
if not isinstance(items_names, list):
items_names = [items_names]
pack_names = set()
for item in items_list:
item_details = list(item.values())[0]
if item_details.get('name', '') in items_names and 'pack' in item_details and \
LooseVersion(item_details.get('toversion', '99.99.99')) >= MINIMUM_DEPENDENCY_VERSION:
pack_names.add(item_details.get('pack'))
if not exclude_ignored_dependencies:
return set(pack_names)
return {p for p in pack_names if p not in constants.IGNORED_DEPENDENCY_CALCULATION}
@staticmethod
def _search_packs_by_items_names_or_ids(items_names: Union[str, list],
items_list: list,
exclude_ignored_dependencies: bool = True,
incident_or_indicator: Optional[str] = 'Both') -> set:
"""
Searches for implemented packs of the given items.
Args:
items_names (str or list): items names to search.
items_list (list): specific section of id set.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
incident_or_indicator (str):
'Indicator' to search packs with indicator fields,
'Incident' to search packs with incident fields,
'Both' to search packs with indicator fields and incident fields.
Returns:
set: found pack ids.
"""
packs = set()
if not isinstance(items_names, list):
items_names = [items_names]
for item_name in items_names:
if incident_or_indicator == 'Incident':
item_possible_ids = [item_name, f'incident_{item_name}', f'{item_name}-mapper']
elif incident_or_indicator == 'Indicator':
item_possible_ids = [item_name, f'indicator_{item_name}', f'{item_name}-mapper']
elif incident_or_indicator == 'Generic':
item_possible_ids = [item_name, f'generic_{item_name}', f'{item_name}-mapper']
elif incident_or_indicator == 'Both':
item_possible_ids = [item_name, f'incident_{item_name}', f'indicator_{item_name}',
f'{item_name}-mapper']
for item_from_id_set in items_list:
machine_name = list(item_from_id_set.keys())[0]
item_details = list(item_from_id_set.values())[0]
if (machine_name in item_possible_ids or item_name == item_details.get('name')) \
and item_details.get('pack') \
and LooseVersion(item_details.get('toversion', '99.99.99')) >= MINIMUM_DEPENDENCY_VERSION \
and (item_details['pack'] not in constants.IGNORED_DEPENDENCY_CALCULATION or
not exclude_ignored_dependencies):
packs.add(item_details.get('pack'))
return packs
@staticmethod
def _search_packs_by_integration_command(command: str,
id_set: dict,
exclude_ignored_dependencies: bool = True) -> set:
"""
Filters packs by implementing integration commands.
Args:
command (str): integration command.
id_set (dict): id set json.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: pack id without ignored packs.
"""
pack_names = set()
for item in id_set['integrations']:
item_details = list(item.values())[0]
if command in item_details.get('commands', []) and 'pack' in item_details and \
LooseVersion(item_details.get('toversion', '99.99.99')) >= MINIMUM_DEPENDENCY_VERSION:
pack_names.add(item_details.get('pack'))
if not exclude_ignored_dependencies:
return set(pack_names)
return {p for p in pack_names if p not in constants.IGNORED_DEPENDENCY_CALCULATION}
@staticmethod
def _detect_generic_commands_dependencies(pack_ids: set) -> list:
"""
Detects whether dependency is mandatory or not. In case two packs implements the same command,
mandatory is set to False.
Args:
pack_ids (set): pack ids list.
Returns:
list: collection of packs and mandatory flag set to False if more than 2 packs found.
"""
return [(p, False) if len(pack_ids) > 1 else (p, True) for p in pack_ids]
@staticmethod
def _label_as_mandatory(pack_ids: set) -> list:
"""
Sets pack as mandatory.
Args:
pack_ids (set): collection of pack ids to set as mandatory.
Returns:
list: collection of pack id and whether mandatory flag.
"""
return [(p, True) for p in pack_ids]
@staticmethod
def _label_as_optional(pack_ids: set) -> list:
"""
Sets pack as optional.
Args:
pack_ids (set): collection of pack ids to set as optional.
Returns:
list: collection of pack id and whether mandatory flag.
"""
return [(p, False) for p in pack_ids]
@staticmethod
def _update_optional_commontypes_pack_dependencies(packs_found_from_incident_fields_or_types: set) -> list:
"""
Updates pack_dependencies_data for optional dependencies, excluding the CommonTypes pack.
The reason being when releasing a new pack with e.g, incident fields in the CommonTypes pack,
only a mandatory dependency will coerce the users to update it to have the necessary content entities.
Args:
packs_found_from_incident_fields_or_types (set): pack names found by a dependency to an incident field,
indicator field or an incident type.
Returns:
pack_dependencies_data (list): representing the dependencies.
"""
common_types_pack_dependency = False
if COMMON_TYPES_PACK in packs_found_from_incident_fields_or_types:
packs_found_from_incident_fields_or_types.remove(COMMON_TYPES_PACK)
common_types_pack_dependency = True
pack_dependencies_data = PackDependencies._label_as_optional(packs_found_from_incident_fields_or_types)
if common_types_pack_dependency:
pack_dependencies_data.extend(PackDependencies._label_as_mandatory({COMMON_TYPES_PACK}))
return pack_dependencies_data
@staticmethod
def _collect_scripts_dependencies(pack_scripts: list,
id_set: dict,
verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects script pack dependencies.
Args:
pack_scripts (list): pack scripts collection.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Scripts', fg='white')
for script_mapping in pack_scripts:
script = next(iter(script_mapping.values()))
script_dependencies = set()
# depends on list can have both scripts and integration commands
depends_on = script.get('depends_on', [])
command_to_integration = list(script.get('command_to_integration', {}).keys())
script_executions = script.get('script_executions', [])
all_dependencies_commands = list(set(depends_on + command_to_integration + script_executions))
dependencies_commands = list(filter(lambda cmd: cmd not in GENERIC_COMMANDS_NAMES,
all_dependencies_commands)) # filter out generic commands
for command in dependencies_commands:
# try to search dependency by scripts first
pack_name = PackDependencies._search_packs_by_items_names(command, id_set['scripts'],
exclude_ignored_dependencies)
if pack_name: # found script dependency implementing pack name
pack_dependencies_data = PackDependencies._label_as_mandatory(pack_name)
script_dependencies.update(pack_dependencies_data) # set found script as mandatory
continue # found dependency in script section, skipping to next depends on element
# try to search dependency by integration integration
pack_names = PackDependencies._search_packs_by_integration_command(
command, id_set, exclude_ignored_dependencies)
if pack_names: # found integration dependency implementing pack name
pack_dependencies_data = PackDependencies._detect_generic_commands_dependencies(pack_names)
script_dependencies.update(pack_dependencies_data)
if verbose:
click.secho(f'{os.path.basename(script.get("file_path", ""))} depends on: {script_dependencies}',
fg='white')
dependencies_packs.update(script_dependencies)
return dependencies_packs
@staticmethod
def _differentiate_playbook_implementing_objects(implementing_objects: list,
skippable_tasks: set,
id_set_section: list,
exclude_ignored_dependencies: bool = True) -> set:
"""
Differentiate implementing objects by skippable.
Args:
implementing_objects (list): playbook object collection.
skippable_tasks (set): playbook skippable tasks.
id_set_section (list): id set section corresponds to implementing_objects (scripts or playbooks).
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies = set()
mandatory_scripts = set(implementing_objects) - skippable_tasks
optional_scripts = set(implementing_objects) - mandatory_scripts
optional_script_packs = PackDependencies._search_packs_by_items_names(
list(optional_scripts), id_set_section, exclude_ignored_dependencies)
if optional_script_packs: # found packs of optional objects
pack_dependencies_data = PackDependencies._label_as_optional(optional_script_packs)
dependencies.update(pack_dependencies_data)
mandatory_script_packs = PackDependencies._search_packs_by_items_names(
list(mandatory_scripts), id_set_section, exclude_ignored_dependencies)
if mandatory_script_packs: # found packs of mandatory objects
pack_dependencies_data = PackDependencies._label_as_mandatory(mandatory_script_packs)
dependencies.update(pack_dependencies_data)
return dependencies
@staticmethod
def _collect_playbooks_dependencies(pack_playbooks: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects playbook pack dependencies.
Args:
pack_playbooks (list): collection of pack playbooks data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Playbooks', fg='white')
for playbook in pack_playbooks:
playbook_data = next(iter(playbook.values()))
playbook_dependencies = set()
skippable_tasks = set(playbook_data.get('skippable_tasks', []))
# searching for packs of implementing integrations
implementing_commands_and_integrations = playbook_data.get('command_to_integration', {})
for command, integration_name in implementing_commands_and_integrations.items():
packs_found_from_integration = set()
if integration_name:
packs_found_from_integration = PackDependencies._search_packs_by_items_names(
integration_name, id_set['integrations'], exclude_ignored_dependencies)
elif command not in GENERIC_COMMANDS_NAMES: # do not collect deps on generic command in Pbs
packs_found_from_integration = PackDependencies._search_packs_by_integration_command(
command, id_set, exclude_ignored_dependencies)
if packs_found_from_integration:
if command in skippable_tasks:
pack_dependencies_data = PackDependencies._label_as_optional(packs_found_from_integration)
else:
pack_dependencies_data = PackDependencies._detect_generic_commands_dependencies(
packs_found_from_integration)
playbook_dependencies.update(pack_dependencies_data)
implementing_scripts = playbook_data.get('implementing_scripts', []) + \
playbook_data.get('filters', []) + \
playbook_data.get('transformers', [])
# searching for packs of implementing scripts
playbook_dependencies.update(PackDependencies._differentiate_playbook_implementing_objects(
implementing_scripts,
skippable_tasks,
id_set['scripts'],
exclude_ignored_dependencies
))
# searching for packs of implementing playbooks
playbook_dependencies.update(PackDependencies._differentiate_playbook_implementing_objects(
playbook_data.get('implementing_playbooks', []),
skippable_tasks,
id_set['playbooks'],
exclude_ignored_dependencies
))
# ---- incident fields packs ----
# playbook dependencies from incident fields should be marked as optional unless CommonTypes pack,
# as customers do not have to use the OOTB inputs.
incident_fields = playbook_data.get('incident_fields', [])
packs_found_from_incident_fields = PackDependencies._search_packs_by_items_names_or_ids(
incident_fields, id_set['IncidentFields'], exclude_ignored_dependencies)
if packs_found_from_incident_fields:
pack_dependencies_data = PackDependencies._update_optional_commontypes_pack_dependencies(
packs_found_from_incident_fields)
playbook_dependencies.update(pack_dependencies_data)
# ---- indicator fields packs ----
# playbook dependencies from indicator fields should be marked as optional unless CommonTypes pack,
# as customers do not have to use the OOTB inputs.
indicator_fields = playbook_data.get('indicator_fields', [])
packs_found_from_indicator_fields = PackDependencies._search_packs_by_items_names_or_ids(
indicator_fields, id_set['IndicatorFields'], exclude_ignored_dependencies)
if packs_found_from_indicator_fields:
pack_dependencies_data = PackDependencies._update_optional_commontypes_pack_dependencies(
packs_found_from_indicator_fields)
playbook_dependencies.update(pack_dependencies_data)
if playbook_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(playbook_data.get("file_path", ""))} depends on: {playbook_dependencies}',
fg='white'
)
dependencies_packs.update(playbook_dependencies)
return dependencies_packs
@staticmethod
def _collect_layouts_dependencies(pack_layouts: list,
id_set: dict,
verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects layouts pack dependencies.
Args:
pack_layouts (list): collection of pack layouts data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Layouts', fg='white')
for layout in pack_layouts:
layout_data = next(iter(layout.values()))
layout_dependencies = set()
if layout_data.get('definitionId') and layout_data.get('definitionId') not in ['incident', 'indicator']:
layout_type = "Generic"
elif layout_data.get('group') == 'indicator' or layout_data.get('kind') == 'indicatorsDetails':
layout_type = 'Indicator'
else:
layout_type = 'Incident'
if layout_type in ["Incident", "Indicator"]:
related_types = layout_data.get('incident_and_indicator_types', [])
packs_found_from_incident_indicator_types = PackDependencies._search_packs_by_items_names(
related_types, id_set[f'{layout_type}Types'],
exclude_ignored_dependencies)
if packs_found_from_incident_indicator_types:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_incident_indicator_types)
layout_dependencies.update(pack_dependencies_data)
related_fields = layout_data.get('incident_and_indicator_fields', [])
packs_found_from_incident_indicator_fields = PackDependencies._search_packs_by_items_names_or_ids(
related_fields, id_set[f'{layout_type}Fields'],
exclude_ignored_dependencies, layout_type)
if packs_found_from_incident_indicator_fields:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_incident_indicator_fields)
layout_dependencies.update(pack_dependencies_data)
if layout_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(layout_data.get("file_path", ""))} depends on: {layout_dependencies}',
fg='white'
)
dependencies_packs.update(layout_dependencies)
return dependencies_packs
@staticmethod
def _collect_incidents_fields_dependencies(pack_incidents_fields: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects in incidents fields dependencies.
Args:
pack_incidents_fields (list): collection of pack incidents fields data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Incident Fields', fg='white')
for incident_field in pack_incidents_fields:
incident_field_data = next(iter(incident_field.values()))
incident_field_dependencies = set()
# If an incident field is used in a specific incident type than it does not depend on it.
# e.g:
# 1. deviceid in CommonTypes pack is being used in the Zimperium pack.
# The CommonTypes pack is not dependent on the Zimperium Pack, but vice versa.
# 2. emailfrom in the Phishing pack is being used in the EWS pack.
# Phishing pack does not depend on EWS but vice versa.
# The opposite dependencies are calculated in: _collect_playbook_dependencies, _collect_mappers_dependencies
related_scripts = incident_field_data.get('scripts', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_scripts)
incident_field_dependencies.update(pack_dependencies_data)
if incident_field_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(incident_field_data.get("file_path", ""))} '
f'depends on: {incident_field_dependencies}', fg='white')
dependencies_packs.update(incident_field_dependencies)
return dependencies_packs
@staticmethod
def _collect_indicators_types_dependencies(pack_indicators_types: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects in indicators types dependencies.
Args:
pack_indicators_types (list): collection of pack indicators types data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Indicator Types', fg='white')
for indicator_type in pack_indicators_types:
indicator_type_data = next(iter(indicator_type.values()))
indicator_type_dependencies = set()
#########################################################################################################
# Do not collect integrations implementing reputation commands to not clutter CommonTypes and other packs
# that have a indicator type using e.g `ip` command with all the reputation integrations.
# this might be an issue if an indicator field is added to an indicator in Common Types
# but not in the pack that implements it.
#########################################################################################################
# related_integrations = indicator_type_data.get('integrations', [])
# packs_found_from_integrations = PackDependencies._search_packs_by_items_names(
# related_integrations, id_set['integrations'], exclude_ignored_dependencies)
#
# if packs_found_from_integrations:
# pack_dependencies_data = PackDependencies. \
# _label_as_optional(packs_found_from_integrations)
# indicator_type_dependencies.update(pack_dependencies_data)
related_scripts = indicator_type_data.get('scripts', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies. \
_label_as_optional(packs_found_from_scripts)
indicator_type_dependencies.update(pack_dependencies_data)
if indicator_type_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(indicator_type_data.get("file_path", ""))} depends on: {indicator_type_dependencies}',
fg='white')
dependencies_packs.update(indicator_type_dependencies)
return dependencies_packs
@staticmethod
def _collect_integrations_dependencies(pack_integrations: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects integrations dependencies.
Args:
pack_integrations (list): collection of pack integrations data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Integrations', fg='white')
for integration in pack_integrations:
integration_data = next(iter(integration.values()))
integration_dependencies: set = set()
related_classifiers = integration_data.get('classifiers', [])
packs_found_from_classifiers = PackDependencies._search_packs_by_items_names_or_ids(
related_classifiers, id_set['Classifiers'], exclude_ignored_dependencies)
if packs_found_from_classifiers:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_classifiers)
dependencies_packs.update(pack_dependencies_data)
related_mappers = integration_data.get('mappers', [])
packs_found_from_mappers = PackDependencies._search_packs_by_items_names_or_ids(
related_mappers, id_set['Mappers'], exclude_ignored_dependencies)
if packs_found_from_mappers:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_mappers)
dependencies_packs.update(pack_dependencies_data)
related_incident_types = integration_data.get('incident_types', [])
packs_found_from_incident_types = PackDependencies._search_packs_by_items_names(
related_incident_types, id_set['IncidentTypes'], exclude_ignored_dependencies)
if packs_found_from_incident_types:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_incident_types)
dependencies_packs.update(pack_dependencies_data)
related_indicator_fields = integration_data.get('indicator_fields')
if related_indicator_fields:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory({related_indicator_fields})
dependencies_packs.update(pack_dependencies_data)
if integration_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(integration_data.get("file_path", ""))} depends on: {integration_dependencies}',
fg='white')
dependencies_packs.update(integration_dependencies)
return dependencies_packs
@staticmethod
def _collect_incidents_types_dependencies(pack_incidents_types: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects in incidents types dependencies.
Args:
pack_incidents_types (list): collection of pack incidents types data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Incident Types', fg='white')
for incident_type in pack_incidents_types:
incident_type_data = next(iter(incident_type.values()))
incident_type_dependencies = set()
related_playbooks = incident_type_data.get('playbooks', [])
packs_found_from_playbooks = PackDependencies._search_packs_by_items_names(
related_playbooks, id_set['playbooks'], exclude_ignored_dependencies)
if packs_found_from_playbooks:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_playbooks)
incident_type_dependencies.update(pack_dependencies_data)
related_scripts = incident_type_data.get('scripts', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_scripts)
incident_type_dependencies.update(pack_dependencies_data)
if incident_type_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(incident_type_data.get("file_path", ""))} depends on: {incident_type_dependencies}',
fg='white')
dependencies_packs.update(incident_type_dependencies)
return dependencies_packs
@staticmethod
def _collect_classifiers_dependencies(pack_classifiers: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects in classifiers dependencies.
Args:
pack_classifiers (list): collection of pack classifiers data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Classifiers', fg='white')
for classifier in pack_classifiers:
classifier_data = next(iter(classifier.values()))
classifier_dependencies = set()
related_types = classifier_data.get('incident_types', [])
if classifier_data.get('definitionId') and classifier_data.get('definitionId') not in ['incident',
'indicator']:
packs_found_from_generic_types = PackDependencies._search_packs_by_items_names_or_ids(
related_types, id_set['GenericTypes'], exclude_ignored_dependencies, "Generic")
if packs_found_from_generic_types:
pack_dependencies_data = PackDependencies._label_as_mandatory(
packs_found_from_generic_types)
classifier_dependencies.update(pack_dependencies_data)
else:
packs_found_from_incident_types = PackDependencies._search_packs_by_items_names(
related_types, id_set['IncidentTypes'], exclude_ignored_dependencies)
# classifiers dependencies from incident types should be marked as optional unless CommonTypes pack,
# as customers do not have to use the OOTB mapping.
if packs_found_from_incident_types:
pack_dependencies_data = PackDependencies._update_optional_commontypes_pack_dependencies(
packs_found_from_incident_types)
classifier_dependencies.update(pack_dependencies_data)
# collect pack dependencies from transformers and filters
related_scripts = classifier_data.get('filters', []) + classifier_data.get('transformers', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names_or_ids(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies._label_as_mandatory(packs_found_from_scripts)
classifier_dependencies.update(pack_dependencies_data)
if classifier_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(classifier_data.get("file_path", ""))} depends on: {classifier_dependencies}',
fg='white')
dependencies_packs.update(classifier_dependencies)
return dependencies_packs
@staticmethod
def _collect_mappers_dependencies(pack_mappers: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects in mappers dependencies.
Args:
pack_mappers (list): collection of pack mappers data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Mappers', fg='white')
for mapper in pack_mappers:
mapper_data = next(iter(mapper.values()))
mapper_dependencies = set()
related_types = mapper_data.get('incident_types', [])
if mapper_data.get('definitionId') and mapper_data.get('definitionId') not in ['incident', 'indicator']:
packs_found_from_generic_types = PackDependencies._search_packs_by_items_names(
related_types, id_set['GenericTypes'], exclude_ignored_dependencies)
if packs_found_from_generic_types:
pack_dependencies_data = PackDependencies._label_as_mandatory(
packs_found_from_generic_types)
mapper_dependencies.update(pack_dependencies_data)
packs_found_from_generic_fields = PackDependencies._search_packs_by_items_names(
related_types, id_set['GenericFields'], exclude_ignored_dependencies)
if packs_found_from_generic_fields:
pack_dependencies_data = PackDependencies._label_as_mandatory(
packs_found_from_generic_fields)
mapper_dependencies.update(pack_dependencies_data)
else:
packs_found_from_incident_types = PackDependencies._search_packs_by_items_names(
related_types, id_set['IncidentTypes'], exclude_ignored_dependencies)
# mappers dependencies from incident types should be marked as optional unless CommonTypes Pack,
# as customers do not have to use the OOTB mapping.
if packs_found_from_incident_types:
pack_dependencies_data = PackDependencies._update_optional_commontypes_pack_dependencies(
packs_found_from_incident_types)
mapper_dependencies.update(pack_dependencies_data)
related_fields = mapper_data.get('incident_fields', [])
packs_found_from_incident_fields = PackDependencies._search_packs_by_items_names_or_ids(
related_fields, id_set['IncidentFields'], exclude_ignored_dependencies)
# mappers dependencies from incident fields should be marked as optional unless CommonTypes pack,
# as customers do not have to use the OOTB mapping.
if packs_found_from_incident_fields:
pack_dependencies_data = PackDependencies._update_optional_commontypes_pack_dependencies(
packs_found_from_incident_fields)
mapper_dependencies.update(pack_dependencies_data)
# collect pack dependencies from transformers and filters
related_scripts = mapper_data.get('filters', []) + mapper_data.get('transformers', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names_or_ids(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies._label_as_mandatory(packs_found_from_scripts)
mapper_dependencies.update(pack_dependencies_data)
if mapper_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(mapper_data.get("file_path", ""))} depends on: {mapper_dependencies}',
fg='white')
dependencies_packs.update(mapper_dependencies)
return dependencies_packs
@staticmethod
def _collect_widget_dependencies(pack_widgets: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True, header: str = "Widgets") -> set:
"""
Collects widget dependencies.
Args:
pack_widgets (list): collection of pack widget data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho(f'### {header}', fg='white')
for widget in pack_widgets:
widget_data = next(iter(widget.values()))
widget_dependencies = set()
related_scripts = widget_data.get('scripts', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_scripts)
widget_dependencies.update(pack_dependencies_data)
if widget_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(widget_data.get("file_path", ""))} depends on: {widget_dependencies}',
fg='white')
dependencies_packs.update(widget_dependencies)
return dependencies_packs
@staticmethod
def _collect_generic_types_dependencies(pack_generic_types: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects generic types dependencies.
Args:
pack_generic_types (list): collection of pack generics types data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Generic Types', fg='white')
for generic_type in pack_generic_types:
generic_type_data = next(iter(generic_type.values()))
generic_type_dependencies = set()
related_scripts = generic_type_data.get('scripts', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_scripts)
generic_type_dependencies.update(pack_dependencies_data)
related_definitions = generic_type_data.get('definitionId')
packs_found_from_definitions = PackDependencies._search_packs_by_items_names_or_ids(
related_definitions, id_set['GenericDefinitions'], exclude_ignored_dependencies)
if packs_found_from_definitions:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_definitions)
generic_type_dependencies.update(pack_dependencies_data)
related_layout = generic_type_data.get('layout')
packs_found_from_layout = PackDependencies._search_packs_by_items_names_or_ids(
related_layout, id_set['Layouts'], exclude_ignored_dependencies)
if packs_found_from_definitions:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_layout)
generic_type_dependencies.update(pack_dependencies_data)
if generic_type_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(generic_type_data.get("file_path", ""))} depends on: {generic_type_dependencies}',
fg='white')
dependencies_packs.update(generic_type_dependencies)
return dependencies_packs
@staticmethod
def _collect_generic_fields_dependencies(pack_generic_fields: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects in generic fields dependencies.
Args:
pack_generic_fields (list): collection of pack incidents fields data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Generic Fields', fg='white')
for generic_field in pack_generic_fields:
generic_field_data = next(iter(generic_field.values()))
generic_field_dependencies = set()
related_scripts = generic_field_data.get('scripts', [])
packs_found_from_scripts = PackDependencies._search_packs_by_items_names(
related_scripts, id_set['scripts'], exclude_ignored_dependencies)
if packs_found_from_scripts:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_scripts)
generic_field_dependencies.update(pack_dependencies_data)
related_definitions = generic_field_data.get('definitionId')
packs_found_from_definitions = PackDependencies._search_packs_by_items_names_or_ids(
related_definitions, id_set['GenericDefinitions'], exclude_ignored_dependencies)
if packs_found_from_definitions:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_definitions)
generic_field_dependencies.update(pack_dependencies_data)
related_types = generic_field_data.get('generic_types')
packs_found_from_types = PackDependencies._search_packs_by_items_names_or_ids(
related_types, id_set['GenericTypes'], exclude_ignored_dependencies)
if packs_found_from_types:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_types)
generic_field_dependencies.update(pack_dependencies_data)
if generic_field_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(generic_field_data.get("file_path", ""))} '
f'depends on: {generic_field_dependencies}', fg='white')
dependencies_packs.update(generic_field_dependencies)
return dependencies_packs
@staticmethod
def _collect_generic_modules_dependencies(pack_generic_modules: list, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Collects generic types dependencies.
Args:
pack_generic_types (list): collection of pack generics types data.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
dependencies_packs = set()
if verbose:
click.secho('### Generic Modules', fg='white')
for generic_module in pack_generic_modules:
generic_module_data = next(iter(generic_module.values()))
generic_module_dependencies = set()
related_definitions = generic_module_data.get('definitionIds')
packs_found_from_definitions = PackDependencies._search_packs_by_items_names_or_ids(
related_definitions, id_set['GenericDefinitions'], exclude_ignored_dependencies)
if packs_found_from_definitions:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_definitions)
generic_module_dependencies.update(pack_dependencies_data)
related_views = generic_module_data.get('views', {})
for view in related_views:
related_dashboards = related_views.get(view, {}).get('dashboards', [])
packs_found_from_dashboards = PackDependencies._search_packs_by_items_names_or_ids(
related_dashboards, id_set['Dashboards'], exclude_ignored_dependencies)
if packs_found_from_dashboards:
pack_dependencies_data = PackDependencies. \
_label_as_mandatory(packs_found_from_dashboards)
generic_module_dependencies.update(pack_dependencies_data)
if generic_module_dependencies:
# do not trim spaces from the end of the string, they are required for the MD structure.
if verbose:
click.secho(
f'{os.path.basename(generic_module_data.get("file_path", ""))} depends on: {generic_module_dependencies}',
fg='white')
dependencies_packs.update(generic_module_dependencies)
return dependencies_packs
@staticmethod
def _collect_pack_items(pack_id: str, id_set: dict) -> dict:
"""
Collects script and playbook content items inside specific pack.
Args:
pack_id (str): pack id, currently pack folder name is in use.
id_set (dict): id set json.
Returns:
list, list: pack scripts and playbooks data.
"""
pack_items = dict()
pack_items['scripts'] = PackDependencies._search_for_pack_items(pack_id, id_set['scripts'])
pack_items['playbooks'] = PackDependencies._search_for_pack_items(pack_id, id_set['playbooks'])
pack_items['layouts'] = PackDependencies._search_for_pack_items(pack_id, id_set['Layouts'])
pack_items['incidents_fields'] = PackDependencies._search_for_pack_items(pack_id, id_set['IncidentFields'])
pack_items['indicators_fields'] = PackDependencies._search_for_pack_items(pack_id, id_set['IndicatorFields'])
pack_items['indicators_types'] = PackDependencies._search_for_pack_items(pack_id, id_set['IndicatorTypes'])
pack_items['integrations'] = PackDependencies._search_for_pack_items(pack_id, id_set['integrations'])
pack_items['incidents_types'] = PackDependencies._search_for_pack_items(pack_id, id_set['IncidentTypes'])
pack_items['classifiers'] = PackDependencies._search_for_pack_items(pack_id, id_set['Classifiers'])
pack_items['mappers'] = PackDependencies._search_for_pack_items(pack_id, id_set['Mappers'])
pack_items['widgets'] = PackDependencies._search_for_pack_items(pack_id, id_set['Widgets'])
pack_items['dashboards'] = PackDependencies._search_for_pack_items(pack_id, id_set['Dashboards'])
pack_items['reports'] = PackDependencies._search_for_pack_items(pack_id, id_set['Reports'])
pack_items['generic_types'] = PackDependencies._search_for_pack_items(pack_id, id_set['GenericTypes'])
pack_items['generic_fields'] = PackDependencies._search_for_pack_items(pack_id, id_set['GenericFields'])
pack_items['generic_modules'] = PackDependencies._search_for_pack_items(pack_id, id_set['GenericModules'])
pack_items['generic_definitions'] = PackDependencies._search_for_pack_items(pack_id,
id_set['GenericDefinitions'])
if not sum(pack_items.values(), []):
click.secho(f"Couldn't find any items for pack '{pack_id}'. Please make sure:\n"
f"1 - The spelling is correct.\n"
f"2 - The id_set.json file is up to date. Delete the file by running: `rm -rf "
f"Tests/id_set.json` and rerun the command.", fg='yellow')
return pack_items
@staticmethod
def _find_pack_dependencies(pack_id: str, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> set:
"""
Searches for specific pack dependencies.
Args:
pack_id (str): pack id, currently pack folder name is in use.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
set: dependencies data that includes pack id and whether is mandatory or not.
"""
if verbose:
click.secho(f'\n# Pack ID: {pack_id}', fg='white')
pack_items = PackDependencies._collect_pack_items(pack_id, id_set)
scripts_dependencies = PackDependencies._collect_scripts_dependencies(
pack_items['scripts'],
id_set,
verbose,
exclude_ignored_dependencies
)
playbooks_dependencies = PackDependencies._collect_playbooks_dependencies(
pack_items['playbooks'],
id_set,
verbose,
exclude_ignored_dependencies
)
layouts_dependencies = PackDependencies._collect_layouts_dependencies(
pack_items['layouts'],
id_set,
verbose,
exclude_ignored_dependencies
)
incidents_fields_dependencies = PackDependencies._collect_incidents_fields_dependencies(
pack_items['incidents_fields'],
id_set,
verbose,
exclude_ignored_dependencies
)
indicators_types_dependencies = PackDependencies._collect_indicators_types_dependencies(
pack_items['indicators_types'],
id_set,
verbose,
exclude_ignored_dependencies
)
integrations_dependencies = PackDependencies._collect_integrations_dependencies(
pack_items['integrations'],
id_set,
verbose,
exclude_ignored_dependencies
)
incidents_types_dependencies = PackDependencies._collect_incidents_types_dependencies(
pack_items['incidents_types'],
id_set,
verbose,
exclude_ignored_dependencies
)
classifiers_dependencies = PackDependencies._collect_classifiers_dependencies(
pack_items['classifiers'],
id_set,
verbose,
exclude_ignored_dependencies
)
mappers_dependencies = PackDependencies._collect_mappers_dependencies(
pack_items['mappers'],
id_set,
verbose,
exclude_ignored_dependencies
)
widget_dependencies = PackDependencies._collect_widget_dependencies(
pack_items['widgets'],
id_set,
verbose,
exclude_ignored_dependencies
)
dashboards_dependencies = PackDependencies._collect_widget_dependencies(
pack_items['dashboards'],
id_set,
verbose,
exclude_ignored_dependencies,
header='Dashboards'
)
reports_dependencies = PackDependencies._collect_widget_dependencies(
pack_items['reports'],
id_set,
verbose,
exclude_ignored_dependencies,
header='Reports'
)
generic_types_dependencies = PackDependencies._collect_generic_types_dependencies(
pack_items['generic_types'],
id_set,
verbose,
exclude_ignored_dependencies,
)
generic_fields_dependencies = PackDependencies._collect_generic_fields_dependencies(
pack_items['generic_fields'],
id_set,
verbose,
exclude_ignored_dependencies,
)
generic_modules_dependencies = PackDependencies._collect_generic_modules_dependencies(
pack_items['generic_modules'],
id_set,
verbose,
exclude_ignored_dependencies,
)
pack_dependencies = (
scripts_dependencies | playbooks_dependencies | layouts_dependencies | incidents_fields_dependencies |
indicators_types_dependencies | integrations_dependencies | incidents_types_dependencies |
classifiers_dependencies | mappers_dependencies | widget_dependencies | dashboards_dependencies |
reports_dependencies | generic_types_dependencies | generic_modules_dependencies | generic_fields_dependencies
)
return pack_dependencies
@staticmethod
def build_all_dependencies_graph(
pack_ids: list,
id_set: dict,
verbose: bool = False,
exclude_ignored_dependencies: bool = True
) -> nx.DiGraph:
"""
Builds all level of dependencies and returns dependency graph for all packs
Args:
pack_ids (list): pack ids, currently pack folder names is in use.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
DiGraph: all dependencies of given packs.
"""
dependency_graph = nx.DiGraph()
for pack in pack_ids:
dependency_graph.add_node(pack, mandatory_for_packs=[])
for pack in pack_ids:
dependencies = PackDependencies._find_pack_dependencies(
pack, id_set, verbose=verbose, exclude_ignored_dependencies=exclude_ignored_dependencies)
for dependency_name, is_mandatory in dependencies:
if dependency_name == pack:
continue
if dependency_name not in dependency_graph:
dependency_graph.add_node(dependency_name, mandatory_for_packs=[])
dependency_graph.add_edge(pack, dependency_name)
if is_mandatory:
dependency_graph.nodes()[dependency_name]['mandatory_for_packs'].append(pack)
return dependency_graph
@staticmethod
def get_dependencies_subgraph_by_dfs(dependencies_graph: nx.DiGraph, source_pack: str) -> nx.DiGraph:
"""
Generates a copy of the graph using DFS that starts with source_pack as source
Args:
dependencies_graph (DiGraph): A graph that represents the dependencies of all packs
source_pack (str): The name of the pack that should be considered as source for the DFS algorithm
Returns:
DiGraph: The DFS sub graph with source_pack as source
"""
dfs_edges = list(nx.edge_dfs(dependencies_graph, source_pack))
subgraph_from_edges = dependencies_graph.edge_subgraph(dfs_edges)
# We need to copy the graph so that we can modify it's content without any modifications to the original graph
return deepcopy(subgraph_from_edges)
@staticmethod
def build_dependency_graph(pack_id: str, id_set: dict, verbose: bool,
exclude_ignored_dependencies: bool = True) -> nx.DiGraph:
"""
Builds all level of dependencies and returns dependency graph.
Args:
pack_id (str): pack id, currently pack folder name is in use.
id_set (dict): id set json.
verbose (bool): Whether to log the dependencies to the console.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
Returns:
DiGraph: all level dependencies of given pack.
"""
graph = nx.DiGraph()
graph.add_node(pack_id) # add pack id as root of the direct graph
found_new_dependencies = True
while found_new_dependencies:
current_number_of_nodes = graph.number_of_nodes()
leaf_nodes = [n for n in graph.nodes() if graph.out_degree(n) == 0]
for leaf in leaf_nodes:
leaf_dependencies = PackDependencies._find_pack_dependencies(
leaf, id_set, verbose=verbose, exclude_ignored_dependencies=exclude_ignored_dependencies)
if leaf_dependencies:
for dependency_name, is_mandatory in leaf_dependencies:
if dependency_name not in graph.nodes():
graph.add_node(dependency_name, mandatory=is_mandatory)
graph.add_edge(leaf, dependency_name)
found_new_dependencies = graph.number_of_nodes() > current_number_of_nodes
return graph
@staticmethod
def find_dependencies(
pack_name: str,
id_set_path: str = '',
exclude_ignored_dependencies: bool = True,
update_pack_metadata: bool = True,
silent_mode: bool = False,
verbose: bool = False,
debug_file_path: str = '',
skip_id_set_creation: bool = False,
use_pack_metadata: bool = False,
complete_data: bool = False
) -> dict:
"""
Main function for dependencies search and pack metadata update.
Args:
pack_name (str): pack id, currently pack folder name is in use.
id_set_path (str): id set json.
exclude_ignored_dependencies (bool): Determines whether to include unsupported dependencies or not.
update_pack_metadata (bool): Determines whether to update to pack metadata or not.
silent_mode (bool): Determines whether to echo the dependencies or not.
verbose(bool): Whether to print the log to the console.
skip_id_set_creation (bool): Whether to skip id_set.json file creation.
complete_data (bool): Whether to update complete data on the dependent packs.
Returns:
Dict: first level dependencies of a given pack.
"""
if not id_set_path or not os.path.isfile(id_set_path):
if not skip_id_set_creation:
id_set = IDSetCreator(print_logs=False).create_id_set()
else:
return {}
else:
with open(id_set_path, 'r') as id_set_file:
id_set = json.load(id_set_file)
if is_external_repository():
print_warning('Running in a private repository, will download the id set from official content')
id_set = get_merged_official_and_local_id_set(id_set, silent_mode=silent_mode)
dependency_graph = PackDependencies.build_dependency_graph(
pack_id=pack_name,
id_set=id_set,
verbose=verbose,
exclude_ignored_dependencies=exclude_ignored_dependencies
)
first_level_dependencies, _ = parse_for_pack_metadata(
dependency_graph,
pack_name,
verbose,
complete_data=complete_data,
id_set_data=id_set,
)
if update_pack_metadata:
update_pack_metadata_with_dependencies(pack_name, first_level_dependencies)
if not silent_mode:
# print the found pack dependency results
click.echo(click.style(f"Found dependencies result for {pack_name} pack:", bold=True))
dependency_result = json.dumps(first_level_dependencies, indent=4)
click.echo(click.style(dependency_result, bold=True))
if use_pack_metadata:
first_level_dependencies = PackDependencies.update_dependencies_from_pack_metadata(pack_name,
first_level_dependencies)
return first_level_dependencies
@staticmethod
def update_dependencies_from_pack_metadata(pack_name, first_level_dependencies):
"""
Update the dependencies by the pack metadata.
Args:
pack_name (str): the pack name to take the metadata from.
first_level_dependencies (list): the given dependencies from the id set.
Returns:
A list of the updated dependencies.
"""
pack_meta_file_content = PackDependencies.get_metadata_from_pack(pack_name)
manual_dependencies = pack_meta_file_content.get('dependencies', {})
first_level_dependencies.update(manual_dependencies)
return first_level_dependencies
@staticmethod
def get_metadata_from_pack(pack_name):
"""
Returns the pack metadata content of a given pack name.
Args:
pack_name (str): the pack name.
Return:
The pack metadata content.
"""
with open(find_pack_path(pack_name)[0], "r") as pack_metadata:
pack_meta_file_content = json.loads(pack_metadata.read())
return pack_meta_file_content
|
1621962
|
from circus.controller import Controller as _Controller
from circus.green.sighandler import SysHandler
from zmq.green.eventloop import ioloop, zmqstream
class Controller(_Controller):
def _init_syshandler(self):
self.sys_hdl = SysHandler(self)
def _init_stream(self):
self.stream = zmqstream.ZMQStream(self.ctrl_socket, self.loop)
self.stream.on_recv(self.handle_message)
def start(self):
self.loop.make_current()
self.initialize()
self.caller = ioloop.PeriodicCallback(self.arbiter.manage_watchers,
self.check_delay)
self.caller.start()
|
1621967
|
import pyclesperanto_prototype as cle
import numpy as np
def test_touch_matrix_to_mesh():
gpu_touch_matrix = cle.push(np.asarray([
[0, 0, 0],
[0, 0, 0],
[0, 1, 0]
]))
gpu_point_list = cle.push(np.asarray([
[1, 4],
[2, 5]
]))
gpu_output = cle.create([5, 5])
cle.set(gpu_output, 0)
gpu_reference = cle.push(np.asarray([
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]
]).T)
cle.touch_matrix_to_mesh(gpu_point_list, gpu_touch_matrix, gpu_output)
a = cle.pull(gpu_output)
b = cle.pull(gpu_reference)
print(a)
print(b)
assert (np.array_equal(a, b))
|
1622129
|
from OpenGL.GL import *
from OpenGL.error import NullFunctionError
class Framebuffer:
def __init__(self):
self.FBO = glGenFramebuffers(1)
def checkComplete(self):
if glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE:
raise RuntimeError('Error when creating Framebuffer.')
self.unbind()
def getId(self):
return self.FBO
def bind(self):
glBindFramebuffer(GL_FRAMEBUFFER, self.FBO)
def unbind(self):
glBindFramebuffer(GL_FRAMEBUFFER, 0)
def __del__(self):
self.delete()
def delete(self):
try:
glDeleteFramebuffers(1, self.FBO)
self.FBO = 0
except (NullFunctionError, TypeError):
pass
|
1622144
|
import yaml
import os
import sys
dir = sys.argv[1]
files = os.listdir(dir)
def process(filename):
print("Processing %s" % filename)
(base, ext) = os.path.splitext(filename)
ICONV='iconv -f iso8859-1 -t utf-8 "%(base)s%(ext)s" > "%(base)s.utf8%(ext)s"' % vars()
print("Running",ICONV)
os.system(ICONV)
filename = "%(base)s.utf8%(ext)s" % vars()
#os.system(ICONV)
lines = []
with open(filename) as infile:
id = infile.readline()
print("Doc ID %s" % id)
lines.append( ('id', id) )
for line in infile:
kwline = line.strip()
if len(kwline) == 0:
continue
if kwline.startswith('#""'):
lines.append( ('bibkey', kwline[3:]))
elif kwline.startswith('#'):
lines.append( ('tag', kwline[1:]))
else:
lines.append( ('text', line) )
doc = { 'id': "", 'tags': [], 'bibkey' : [], 'text' : [] }
for (code, text) in lines:
if code == 'id':
doc['id'] = text.strip()
elif code == 'tag':
doc['tags'].append(text.strip())
elif code == 'bibkey':
doc['bibkey'].append(text.strip())
elif code == 'text':
doc['text'].append(text)
doc['text'] = "\n".join(doc['text'])
(basename, extension) = os.path.splitext(filename)
outfilename = basename + '.yaml'
with open(outfilename, "w") as outfile:
outfile.write(doc['id'] + '\n')
outfile.write('---\n')
del(doc['id'])
outfile.write(yaml.dump(doc))
for f in files:
if f.endswith('.md'):
if not f.endswith('.utf8.md'):
process( os.path.join(dir, f) )
else:
print("Skipping generated UTF-8 file", f)
|
1622155
|
import os
import openpyxl
import pytest
from openpyxl import load_workbook
from viadot.tasks.open_apis.uk_carbon_intensity import StatsToCSV, StatsToExcel
TEST_FILE_PATH = "/home/viadot/tests/uk_carbon_intensity_test.csv"
TEST_FILE_PATH_EXCEL = "/home/viadot/tests/uk_carbon_intensity_test.xlsx"
@pytest.fixture(scope="session")
def ukci_task():
ukci_task = StatsToCSV()
yield ukci_task
os.remove(TEST_FILE_PATH)
@pytest.fixture(scope="session")
def ukci_task_excel():
ukci_task_excel = StatsToExcel()
yield ukci_task_excel
os.remove(TEST_FILE_PATH_EXCEL)
# def test_uk_carbon_intensity_to_csv(ukci_task):
# ukci_task.run(path=TEST_FILE_PATH)
# if_exist = os.path.isfile(TEST_FILE_PATH)
# assert if_exist == True
# def test_uk_carbon_intensity_to_excel(ukci_task_excel):
# ukci_task_excel.run(path=TEST_FILE_PATH_EXCEL)
# if_exist = os.path.isfile(TEST_FILE_PATH_EXCEL)
# assert if_exist == True
# def test_uk_carbon_intensity_to_excel_contain(ukci_task_excel):
# ukci_task_excel.run(path=TEST_FILE_PATH_EXCEL)
# excel_file = load_workbook(TEST_FILE_PATH_EXCEL)
# value = excel_file["A1"].value
# assert value == "from"
|
1622158
|
import asyncio
import json
from pathlib import Path
import pytest
from fastapi import APIRouter, FastAPI
from httpx import AsyncClient
from meilisearch_python_async import Client
from meilisearch_python_async.task import wait_for_task
from meilisearch_fastapi._config import get_config
from meilisearch_fastapi.routes import (
document_routes,
index_routes,
meilisearch_routes,
search_routes,
settings_routes,
)
ROOT_PATH = Path().absolute()
SMALL_MOVIES_PATH = ROOT_PATH / "tests" / "assets" / "small_movies.json"
MASTER_KEY = "masterKey"
MEILISEARCH_URL = "localhost:7700"
INDEX_UID = "indexUID"
INDEX_UID2 = "indexUID2"
INDEX_UID3 = "indexUID3"
INDEX_UID4 = "indexUID4"
INDEX_FIXTURE = [
{"uid": INDEX_UID},
{"uid": INDEX_UID2, "primary_key": "book_id"},
]
@pytest.fixture(autouse=True)
def env_vars(monkeypatch):
monkeypatch.setenv("MEILI_HTTP_ADDR", MEILISEARCH_URL)
monkeypatch.setenv("MEILI_MASTER_KEY", MASTER_KEY)
yield
monkeypatch.delenv("MEILI_HTTP_ADDR", raising=False)
monkeypatch.delenv("MEILI_MASTER_KEY", raising=False)
@pytest.fixture
def meilisearch_url():
return MEILISEARCH_URL
@pytest.fixture
def master_key():
return MASTER_KEY
@pytest.fixture(scope="session", autouse=True)
def event_loop():
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="session")
async def test_client():
app = FastAPI()
api_router = APIRouter()
api_router.include_router(document_routes.router, prefix="/documents")
api_router.include_router(index_routes.router, prefix="/indexes")
api_router.include_router(meilisearch_routes.router, prefix="/meilisearch")
api_router.include_router(search_routes.router, prefix="/search")
api_router.include_router(settings_routes.router, prefix="/settings")
app.include_router(api_router)
async with AsyncClient(app=app, base_url="http://test/", follow_redirects=True) as ac:
yield ac
@pytest.fixture
@pytest.mark.asyncio
async def raw_client():
async with Client(f"http://{MEILISEARCH_URL}", MASTER_KEY) as client:
yield client
@pytest.mark.asyncio
@pytest.fixture(autouse=True)
async def clear_indexes():
"""Auto-clears the indexes after each test function run.
Makes all the test functions independent.
"""
yield
async with Client(f"http://{MEILISEARCH_URL}", MASTER_KEY) as client:
indexes = await client.get_indexes()
if indexes:
for index in indexes:
response = await client.index(index.uid).delete()
await wait_for_task(client.http_client, response.uid)
@pytest.fixture(autouse=True)
def clear_config_cache():
yield
get_config.cache_clear()
@pytest.fixture
def index_uid():
return INDEX_UID
@pytest.fixture
def index_uid2():
return INDEX_UID2
@pytest.fixture
def index_uid3():
return INDEX_UID3
@pytest.fixture
def index_uid4():
return INDEX_UID4
@pytest.mark.asyncio
@pytest.fixture
async def empty_index():
async with Client(f"http://{MEILISEARCH_URL}", MASTER_KEY) as client:
index = await client.create_index(uid=INDEX_UID)
yield INDEX_UID, index
@pytest.fixture(scope="session")
def small_movies():
"""Runs once per session. Provides the content of small_movies.json"""
with open(SMALL_MOVIES_PATH, "r") as movie_file:
yield json.loads(movie_file.read())
@pytest.mark.asyncio
@pytest.fixture
async def index_with_documents(empty_index, small_movies):
uid, index = empty_index
response = await index.add_documents(small_movies)
await wait_for_task(index.http_client, response.uid)
yield uid, index
@pytest.mark.asyncio
@pytest.fixture
async def indexes_sample():
async with Client(f"http://{MEILISEARCH_URL}", MASTER_KEY) as client:
indexes = []
for index_args in INDEX_FIXTURE:
index = await client.create_index(**index_args)
indexes.append(index)
yield indexes
|
1622172
|
import os, sys
os.system('pkg install tor')
os.system('pip2 install torrequest')
os.system('pip2 install urllib')
os.system('pip2 install bs4')
os.system('pip2 install requests')
os.system('python2 blog_pack.py')
|
1622213
|
import socket
import re
def validateipv4ip(address):
try:
socket.inet_aton(address)
except socket.error:
print ("wrong IPv4 IP",address)
def validateipv6ip(address):
### for IPv6 IP address validation
try:
socket.inet_pton(socket.AF_INET6,address)
except socket.error:
print ("wrong IPv6 IP", address)
sampletext="""
ip tacacs server 10.10.10.10
int fa0/1
ip address 25.25.25.298 255.255.255.255
no shut
ip name-server 192.168.3.11
int fa0/0
ipv6 address 2001:0db8:85a3:0000:0000:8a2e:0370:7334
ip logging host 192.168.3.11
int te0/2
ipv6 address fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
no shut
exit
"""
sampletext=sampletext.split("\n")
for line in sampletext:
if ("ipv6" in line):
ipaddress=re.search("(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))",line)
validateipv6ip(ipaddress.group(0))
elif(re.search("\d+.\d+.\d+.\d+",line)):
ipaddress=re.search("\d+.\d+.\d+.\d+",line)
validateipv4ip(ipaddress.group(0))
|
1622289
|
from nes_py import NESEnv
import tqdm
env = NESEnv('./nes_py/tests/games/super-mario-bros-1.nes')
done = True
try:
for i in tqdm.tqdm(range(5000)):
if done:
state = env.reset()
done = False
else:
state, reward, done, info = env.step(env.action_space.sample())
if (i + 1) % 12:
env._backup()
if (i + 1) % 27:
env._restore()
except KeyboardInterrupt:
pass
|
1622300
|
from adventofcode.util.helpers import grid_to_string
from adventofcode.year_2021.day_25_2021 import part_one, get_values, do_step
test_input = [
'v...>>.vv>',
'.vv>>.vv..',
'>>.>v>...v',
'>>v>>.>.v.',
'v>v.vv.v..',
'>.>>..v...',
'.vv..>.>v.',
'v.v..>>v.v',
'....v..v.>',
]
step_1 = [
'....>.>v.>',
'v.v>.>v.v.',
'>v>>..>v..',
'>>v>v>.>.v',
'.>v.v...v.',
'v>>.>vvv..',
'..v...>>..',
'vv...>>vv.',
'>.v.v..v.v',
]
def test_do_step():
grid = get_values(test_input)
do_step(grid)
print()
print(grid_to_string(grid))
print()
print(grid_to_string(get_values(step_1)))
assert grid == get_values(step_1)
def test_part_one():
assert part_one(test_input) == 58
# def test_part_two():
# assert part_two(test_input) == 'x'
|
1622317
|
from PIL import Image
import pytest
from yoga.image.encoders import webp
from yoga.image.encoders import webp_lossless
class Test_is_lossless_webp(object):
def test_with_lossy_webp(self):
image_bytes = open("test/images/alpha.lossy.webp", "rb").read()
assert webp_lossless.is_lossless_webp(image_bytes) is False
def test_with_lossless_webp(self):
image_bytes = open("test/images/alpha.lossless.webp", "rb").read()
assert webp_lossless.is_lossless_webp(image_bytes) is True
def test_with_png(self):
image_bytes = open("test/images/alpha.png", "rb").read()
assert webp_lossless.is_lossless_webp(image_bytes) is False
class Test_encode_lossless_webp(object):
@pytest.mark.parametrize(
"image_path",
[
"test/images/image1.jpg",
"test/images/indexed.png",
"test/images/grayscale.png",
],
)
def test_no_alpha(self, image_path):
input_image = Image.open(image_path)
output_image_bytes = webp_lossless.optimize_lossless_webp(input_image)
riff = webp.get_riff_structure(output_image_bytes)
# Checks there is only wanted chunks in the file
for chunk in riff["chunks"]:
assert chunk["type"] in ["VP8L"]
def test_unused_alpha(self):
input_image = Image.open("test/images/unused-alpha.png")
output_image_bytes = webp_lossless.optimize_lossless_webp(input_image)
riff = webp.get_riff_structure(output_image_bytes)
# Checks there is only wanted chunks in the file
for chunk in riff["chunks"]:
assert chunk["type"] in ["VP8L"]
@pytest.mark.parametrize(
"image_path",
[
"test/images/alpha.png",
"test/images/threshold.png",
],
)
def test_alpha(self, image_path):
input_image = Image.open(image_path)
output_image_bytes = webp_lossless.optimize_lossless_webp(input_image)
riff = webp.get_riff_structure(output_image_bytes)
# Checks there is only wanted chunks in the file
for chunk in riff["chunks"]:
assert chunk["type"] in ["VP8L"]
|
1622342
|
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
from PIL import ImageEnhance
df = pd.read_csv("datasets/euchristmas.csv").set_index("country")
FONT = sans
PALETTE = {
"D25": VegaPalette10.BLUE,
"J7": VegaPalette10.GREEN,
"J6": VegaPalette10.ORANGE,
"N": VegaPalette10.RED
}
DESCRIPTIONS = [
"**25 DECEMBER** (Gregorian & Revised Julian date)",
"**7 JANUARY** (Old Style Julian date)",
"**6 JANUARY** (Gregorian Epiphany date)",
"**NOT A PUBLIC HOLIDAY**",
"**DATE DEPENDS ON LOCATION**",
"**TWO PUBLIC HOLIDAYS**",
]
FOOTER = None
def colorfn(c):
if c in ['Sea', 'Borders']: return "white"
elif c not in df.index: return "grey"
elif "&" in df.group.get(c):
colors = [PALETTE[i] for i in df.group[c].split("&")]
return Stripe(20, *colors)
elif "|" in df.group.get(c):
return VegaPalette10.BROWN
else: return PALETTE[df.group.get(c)]
def labelfn(c, w, h):
if c not in df.index: return None
label = df.word[c].replace("\\n", "\n")
return Image.from_text_bounded(label, (w, h), 24, papply(LFONT(c), bold=True), align="center", padding=(0,0,0,2))
map = map_chart("maps/Europe2.png", colorfn, None)
boxes = list(PALETTE.values()) + [VegaPalette10.BROWN, Image.from_pattern(Stripe(20, PALETTE["D25"], PALETTE["J7"]), (40,40))]
legend = generate_legend(boxes, DESCRIPTIONS, box_sizes=(40), header="Christmas Day holiday falls on...".upper(), footer=FOOTER, font_family=partial(FONT, 16))
chart = map.place(legend, align=(1,0), padding=50)
title = Image.from_column([
Image.from_text("DATE OF THE CHRISTMAS DAY PUBLIC HOLIDAY", FONT(48, bold=True)),
#Image.from_text("date of Christmas as public holiday", FONT(36))
],
bg="white")
img = Image.from_column([title, chart], bg="white", padding=2)
img = ImageEnhance.Color(img).enhance(0.8)
img.place(Image.from_text("/u/Udzu", FONT(16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/euchristmas.png")
|
1622354
|
from data_reader import OneWorldAllEntityinKBIterateLoader
from utils import oneworld_entiredataset_loader_for_encoding_entities, KBIndexerWithFaiss, BiEncoderTopXRetriever
from utils import parse_duidx2encoded_emb_2_dui2emb
from model import WrappedModel_for_entityencoding
from encoders import InKBAllEntitiesEncoder, BiEncoderForOnlyMentionOutput
import pdb
class HardNegativesSearcherForEachEpochStart:
def __init__(self, args, world_name, reader, embedder, mention_encoder, entity_encoder,
vocab, berttokenizer, bertindexer):
self.args = args
self.world_name = world_name
self.reader = reader
self.mention_encoder = mention_encoder
self.entity_encoder = entity_encoder
self.vocab = vocab
self.tokenizer = berttokenizer
self.tokenindexer = bertindexer
self.embedder = embedder
# load one world dataset
self.oneworld_loader()
self.entity_loader = OneWorldAllEntityinKBIterateLoader(args=self.args, idx2dui=self.idx2dui,
dui2title=self.dui2title, dui2desc=self.dui2desc,
textfield_embedder=self.embedder,
pretrained_tokenizer=self.tokenizer,
token_indexer=self.tokenindexer)
self.entity_encoder_wrapping_model = WrappedModel_for_entityencoding(args=args,
entity_encoder=self.entity_encoder,
vocab=vocab)
self.encodeAllEntitiesEncoder = InKBAllEntitiesEncoder(args=args,
entity_loader_datasetreaderclass=self.entity_loader,
entity_encoder_wrapping_model=self.entity_encoder_wrapping_model,
vocab=vocab)
def hardNegativesSearcherandSetter(self, howManySampleSearch=20):
dui2encoded_emb, duidx2encoded_emb = self.dui2EncoderEntityEmbReturner()
forstoring_encoded_entities_to_faiss = self.encodedEmbFaissAdder(dui2EncodedEmb=dui2encoded_emb)
biencoderOnlyForMentionOutput = BiEncoderForOnlyMentionOutput(args=self.args,
mention_encoder=self.mention_encoder,
vocab=self.vocab)
biencoderOnlyForMentionOutput.cuda(), biencoderOnlyForMentionOutput.eval()
retriever = self.topXRetieverLoader(biencoderOnlyForMentionOutputClass=biencoderOnlyForMentionOutput,
forstoring_encoded_entities_to_faissAdderclass=forstoring_encoded_entities_to_faiss,
duidx2encoded_emb=duidx2encoded_emb)
mentionId2GoldDUIDX, mentionId2HardNegativeDUIDX = {}, {}
print("\n########\nHARD NEGATIVE MININGS started\n########\n")
for faiss_search_candidate_result_duidxs, mention_uniq_ids, gold_duidxs in retriever.biencoder_tophits_retrievaler(train_or_dev_or_test_flag='train',
how_many_top_hits_preserved=howManySampleSearch):
for one_mention_search_result, mention_uniq_id, gold_duidx in zip(faiss_search_candidate_result_duidxs, mention_uniq_ids, gold_duidxs):
hard_negatives = one_mention_search_result[one_mention_search_result != gold_duidx][:self.args.hard_negatives_num].tolist()
mention_uniq_id = int(mention_uniq_id)
gold_duidx = int(gold_duidx)
mentionId2GoldDUIDX.update({mention_uniq_id:gold_duidx})
mentionId2HardNegativeDUIDX.update({mention_uniq_id:hard_negatives})
self.reader.hardNegativesUpdater(mentionId2GoldDUIDX=mentionId2GoldDUIDX, mentionId2HardNegativeDUIDX=mentionId2HardNegativeDUIDX)
def topXRetieverLoader(self, biencoderOnlyForMentionOutputClass, forstoring_encoded_entities_to_faissAdderclass,
duidx2encoded_emb):
return BiEncoderTopXRetriever(args=self.args,vocab=self.vocab,
biencoder_onlyfor_encodingmentions=biencoderOnlyForMentionOutputClass,
faiss_stored_kb=forstoring_encoded_entities_to_faissAdderclass.indexed_faiss_returner(),
reader_for_mentions=self.reader,
duidx2encoded_emb=duidx2encoded_emb)
def dui2EncoderEntityEmbReturner(self):
duidx2encoded_emb = self.encodeAllEntitiesEncoder.encoding_all_entities()
dui2encoded_emb = parse_duidx2encoded_emb_2_dui2emb(duidx2encoded_emb=duidx2encoded_emb, original_dui2idx=self.dui2idx)
return dui2encoded_emb, duidx2encoded_emb
def encodedEmbFaissAdder(self, dui2EncodedEmb):
return KBIndexerWithFaiss(args=self.args, input_dui2idx=self.dui2idx, input_idx2dui=self.idx2dui,
input_dui2emb=dui2EncodedEmb, search_method_for_faiss=self.args.search_method,
entity_emb_dim=self.entityEmbDimReturner())
def oneworld_loader(self):
'''
load self.dui2desc, self.dui2title, self.idx2dui
:return:
'''
self.dui2idx, self.idx2dui, self.dui2title, self.dui2desc = oneworld_entiredataset_loader_for_encoding_entities(args=self.args,
world_name=self.world_name)
def entityEmbDimReturner(self):
if self.args.dimentionReduction:
return self.args.dimentionReductionToThisDim
else:
return 768
|
1622357
|
import base64
import hashlib
from django import template
register = template.Library()
@register.simple_tag
def ssh_to_fingerprint(line):
try:
key = base64.b64decode(line.strip().split()[1].encode('ascii'))
fp_plain = hashlib.md5(key).hexdigest()
return ':'.join(a + b for a, b in zip(fp_plain[::2], fp_plain[1::2]))
except Exception:
return 'Invalid key'
|
1622358
|
from bs4 import BeautifulSoup
import requests
source = requests.get('http://www.example.com/').text
soup = BeautifulSoup(source, 'lxml') # Using lxml => Run pip install lxml
# Find with <div> tag
article = soup.find('div')
print(article.prettify())
# Grab a Class
summary = article.find('div', class_='entry_content').p.text
# Grab Video
vid_src = article.find('iframe', class_='youtube-player')['src']
for div in soup.find_all('div'):
headline = div.h1.text
print(headline)
# Select Method + getText()
p = soup.select('p')
p_text = p[0].getText()
|
1622364
|
import json
import time
import random
import paho.mqtt.client as mqtt
from . import Adaptor
class MQTTAdaptor(Adaptor):
""" Provide pubsub events over MQTT """
key = 'mqtt'
connected = False
loop_started = False
callbacks = {}
def connect(self):
""" Make mqtt connection and setup broker """
def on_conn(client, userdata, flags, rc):
if rc == 0:
self.connected = True
host = self.config.get('host', "localhost")
# port = self.config.get('port', 1883)
self.connection = mqtt.Client(f'mudpi-{random.randint(0, 100)}')
self.connection.on_connect = on_conn
username = self.config.get('username')
password = self.<PASSWORD>.get('password')
if all([username, password]):
self.connection.username_pw_set(username, password)
self.connection.connect(host)
while not self.connected:
self.get_message()
time.sleep(0.1)
return True
def disconnect(self):
""" Close active connections and cleanup subscribers """
self.connection.loop_stop()
self.connection.disconnect()
return True
def subscribe(self, topic, callback):
""" Listen on a topic and pass event data to callback """
if topic not in self.callbacks:
self.callbacks[topic] = [callback]
else:
if callback not in self.callbacks[topic]:
self.callbacks[topic].append(callback)
def callback_handler(client, userdata, message):
# log = f"{message.payload.decode()} {message.topic}"
if message.topic in self.callbacks:
for callbk in self.callbacks[message.topic]:
callbk(message.payload)
self.connection.on_message = callback_handler
return self.connection.subscribe(topic)
def unsubscribe(self, topic):
""" Stop listening for events on a topic """
del self.callbacks[topic]
return self.connection.unsubscribe(topic)
def publish(self, topic, data=None):
""" Publish an event on the topic """
if data:
return self.connection.publish(topic, json.dumps(data))
return self.connection.publish(topic)
def get_message(self):
""" Check for new messages waiting """
if not self.loop_started:
self.connection.loop_start()
self.loop_started = True
|
1622408
|
version_info = (6, 3, 4, 'dev0')
__version__ = '.'.join(map(str, version_info))
kernel_protocol_version_info = (5, 1)
kernel_protocol_version = '%s.%s' % kernel_protocol_version_info
|
1622451
|
from gym.spaces import Box
from ray.rllib.agents.dqn.distributional_q_tf_model import \
DistributionalQTFModel
from ray.rllib.agents.dqn.dqn_torch_model import \
DQNTorchModel
from ray.rllib.models.tf.fcnet import FullyConnectedNetwork
from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFC
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.torch_utils import FLOAT_MIN, FLOAT_MAX
tf1, tf, tfv = try_import_tf()
torch, nn = try_import_torch()
class ParametricActionsModel(DistributionalQTFModel):
"""Parametric action model that handles the dot product and masking.
This assumes the outputs are logits for a single Categorical action dist.
Getting this to work with a more complex output (e.g., if the action space
is a tuple of several distributions) is also possible but left as an
exercise to the reader.
"""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
super(ParametricActionsModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
self.action_embed_model = FullyConnectedNetwork(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = tf.expand_dims(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = tf.reduce_sum(avail_actions * intent_vector, axis=2)
# Mask out invalid actions (use tf.float32.min for stability)
inf_mask = tf.maximum(tf.math.log(action_mask), tf.float32.min)
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
class TorchParametricActionsModel(DQNTorchModel):
"""PyTorch version of above ParametricActionsModel."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
DQNTorchModel.__init__(self, obs_space, action_space, num_outputs,
model_config, name, **kw)
self.action_embed_model = TorchFC(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_action_embed")
def forward(self, input_dict, state, seq_lens):
# Extract the available actions tensor from the observation.
avail_actions = input_dict["obs"]["avail_actions"]
action_mask = input_dict["obs"]["action_mask"]
# Compute the predicted action embedding
action_embed, _ = self.action_embed_model({
"obs": input_dict["obs"]["cart"]
})
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = torch.unsqueeze(action_embed, 1)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = torch.sum(avail_actions * intent_vector, dim=2)
# Mask out invalid actions (use -inf to tag invalid).
# These are then recognized by the EpsilonGreedy exploration component
# as invalid actions that are not to be chosen.
inf_mask = torch.clamp(torch.log(action_mask), FLOAT_MIN, FLOAT_MAX)
return action_logits + inf_mask, state
def value_function(self):
return self.action_embed_model.value_function()
class ParametricActionsModelThatLearnsEmbeddings(DistributionalQTFModel):
"""Same as the above ParametricActionsModel.
However, this version also learns the action embeddings.
"""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
true_obs_shape=(4, ),
action_embed_size=2,
**kw):
super(ParametricActionsModelThatLearnsEmbeddings, self).__init__(
obs_space, action_space, num_outputs, model_config, name, **kw)
action_ids_shifted = tf.constant(
list(range(1, num_outputs + 1)), dtype=tf.float32)
obs_cart = tf.keras.layers.Input(shape=true_obs_shape, name="obs_cart")
valid_avail_actions_mask = tf.keras.layers.Input(
shape=(num_outputs), name="valid_avail_actions_mask")
self.pred_action_embed_model = FullyConnectedNetwork(
Box(-1, 1, shape=true_obs_shape), action_space, action_embed_size,
model_config, name + "_pred_action_embed")
# Compute the predicted action embedding
pred_action_embed, _ = self.pred_action_embed_model({"obs": obs_cart})
_value_out = self.pred_action_embed_model.value_function()
# Expand the model output to [BATCH, 1, EMBED_SIZE]. Note that the
# avail actions tensor is of shape [BATCH, MAX_ACTIONS, EMBED_SIZE].
intent_vector = tf.expand_dims(pred_action_embed, 1)
valid_avail_actions = action_ids_shifted * valid_avail_actions_mask
# Embedding for valid available actions which will be learned.
# Embedding vector for 0 is an invalid embedding (a "dummy embedding").
valid_avail_actions_embed = tf.keras.layers.Embedding(
input_dim=num_outputs + 1,
output_dim=action_embed_size,
name="action_embed_matrix")(valid_avail_actions)
# Batch dot product => shape of logits is [BATCH, MAX_ACTIONS].
action_logits = tf.reduce_sum(
valid_avail_actions_embed * intent_vector, axis=2)
# Mask out invalid actions (use tf.float32.min for stability)
inf_mask = tf.maximum(
tf.math.log(valid_avail_actions_mask), tf.float32.min)
action_logits = action_logits + inf_mask
self.param_actions_model = tf.keras.Model(
inputs=[obs_cart, valid_avail_actions_mask],
outputs=[action_logits, _value_out])
self.param_actions_model.summary()
def forward(self, input_dict, state, seq_lens):
# Extract the available actions mask tensor from the observation.
valid_avail_actions_mask = input_dict["obs"][
"valid_avail_actions_mask"]
action_logits, self._value_out = self.param_actions_model(
[input_dict["obs"]["cart"], valid_avail_actions_mask])
return action_logits, state
def value_function(self):
return self._value_out
|
1622480
|
from fairness.metrics.Metric import Metric
class Diff(Metric):
def __init__(self, metric1, metric2):
Metric.__init__(self)
self.metric1 = metric1
self.metric2 = metric2
self.name = "diff:" + self.metric1.get_name() + 'to' + self.metric2.get_name()
def calc(self, actual, predicted, dict_of_sensitive_lists, single_sensitive_name,
unprotected_vals, positive_pred):
m1 = self.metric1.calc(actual, predicted, dict_of_sensitive_lists, single_sensitive_name,
unprotected_vals, positive_pred)
m2 = self.metric2.calc(actual, predicted, dict_of_sensitive_lists,
single_sensitive_name, unprotected_vals, positive_pred)
if m1 is None or m2 is None:
return None
diff = m1 - m2
return 1.0 - diff
def is_better_than(self, val1, val2):
"""
Assumes that 1.0 is the goal value.
"""
dist1 = math.fabs(1.0 - val1)
dist2 = math.fabs(1.0 - val2)
return dist1 <= dist2
|
1622483
|
import os
import sys
import mock
import pytest
from program_synthesis.karel import arguments
from program_synthesis.karel.dataset import dataset
from program_synthesis.karel.dataset import edit_data_loader
from program_synthesis.karel.models import karel_edit_model
@pytest.fixture
def args():
with mock.patch('sys.argv', [
'test', '--num_placeholders', '0', '--karel-merge-io', 'setlstm'
]):
args = arguments.parse('', 'train')
args.word_vocab = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../data/karel/word.vocab'))
return args
def test_get_batch_sync(args):
karel_dataset = dataset.KarelTorchDataset(
dataset.relpath('../../data/karel/train.pkl'))
batch_size = 32
m = karel_edit_model.KarelStepEditModel(args)
batch_processor = m.batch_processor(for_eval=False)
loader = edit_data_loader.SynchronousKarelEditDataLoader(
karel_dataset,
batch_size,
batch_processor)
batch = next(iter(loader))
def test_exhaustion_sync(args):
karel_dataset = dataset.KarelTorchDataset(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'testdata',
'test-cs106a-line1.pkl'))
batch_size = 2
m = karel_edit_model.KarelStepEditModel(args)
batch_processor = m.batch_processor(for_eval=True)
loader = edit_data_loader.SynchronousKarelEditDataLoader(
karel_dataset,
batch_size,
batch_processor,
beam_size=None,
shuffle=False)
all_batches = list(loader)
assert sum(len(b.orig_examples) for b in all_batches) == 16
# Run again
all_batches2 = list(loader)
assert sum(len(b.orig_examples) for b in all_batches2) == 16
# Test that all of the outputs are unique
all_codes = [e.cur_code for b in all_batches for e in b.orig_examples]
assert len(set(all_codes)) == len(all_codes)
|
1622497
|
import numpy as np
import matplotlib.pyplot as plt
def insert_zeros(trace, tt=None):
"""Insert zero locations in data trace and tt vector based on linear fit"""
if tt is None:
tt = np.arange(len(trace))
# Find zeros
zc_idx = np.where(np.diff(np.signbit(trace)))[0]
x1 = tt[zc_idx]
x2 = tt[zc_idx + 1]
y1 = trace[zc_idx]
y2 = trace[zc_idx + 1]
a = (y2 - y1) / (x2 - x1)
tt_zero = x1 - y1 / a
# split tt and trace
tt_split = np.split(tt, zc_idx + 1)
trace_split = np.split(trace, zc_idx + 1)
tt_zi = tt_split[0]
trace_zi = trace_split[0]
# insert zeros in tt and trace
for i in range(len(tt_zero)):
tt_zi = np.hstack(
(tt_zi, np.array([tt_zero[i]]), tt_split[i + 1]))
trace_zi = np.hstack(
(trace_zi, np.zeros(1), trace_split[i + 1]))
return trace_zi, tt_zi
def wiggle_input_check(data, tt, xx, sf, verbose):
''' Helper function for wiggle() and traces() to check input
'''
# Input check for verbose
if not isinstance(verbose, bool):
raise TypeError("verbose must be a bool")
# Input check for data
if type(data).__module__ != np.__name__:
raise TypeError("data must be a numpy array")
if len(data.shape) != 2:
raise ValueError("data must be a 2D array")
# Input check for tt
if tt is None:
tt = np.arange(data.shape[0])
if verbose:
print("tt is automatically generated.")
print(tt)
else:
if type(tt).__module__ != np.__name__:
raise TypeError("tt must be a numpy array")
if len(tt.shape) != 1:
raise ValueError("tt must be a 1D array")
if tt.shape[0] != data.shape[0]:
raise ValueError("tt must have same as data's rows")
# Input check for xx
if xx is None:
xx = np.arange(data.shape[1])
if verbose:
print("xx is automatically generated.")
print(xx)
else:
if type(xx).__module__ != np.__name__:
raise TypeError("tt must be a numpy array")
if len(xx.shape) != 1:
raise ValueError("tt must be a 1D array")
if tt.shape[0] != data.shape[0]:
raise ValueError("tt must have same as data's rows")
if verbose:
print(xx)
# Input check for streth factor (sf)
if not isinstance(sf, (int, float)):
raise TypeError("Strech factor(sf) must be a number")
# Compute trace horizontal spacing
ts = np.min(np.diff(xx))
# Rescale data by trace_spacing and strech_factor
data_max_std = np.max(np.std(data, axis=0))
data = data / data_max_std * ts * sf
return data, tt, xx, ts
def wiggle(data, tt=None, xx=None, color='k', sf=0.15, verbose=False):
'''Wiggle plot of a sesimic data section
Syntax examples:
wiggle(data)
wiggle(data, tt)
wiggle(data, tt, xx)
wiggle(data, tt, xx, color)
fi = wiggle(data, tt, xx, color, sf, verbose)
Use the column major order for array as in Fortran to optimal performance.
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
'''
# Input check
data, tt, xx, ts = wiggle_input_check(data, tt, xx, sf, verbose)
# Plot data using matplotlib.pyplot
Ntr = data.shape[1]
ax = plt.gca()
for ntr in range(Ntr):
trace = data[:, ntr]
offset = xx[ntr]
if verbose:
print(offset)
trace_zi, tt_zi = insert_zeros(trace, tt)
ax.fill_betweenx(tt_zi, offset, trace_zi + offset,
where=trace_zi >= 0,
facecolor=color)
ax.plot(trace_zi + offset, tt_zi, color)
ax.set_xlim(xx[0] - ts, xx[-1] + ts)
ax.set_ylim(tt[0], tt[-1])
ax.invert_yaxis()
if __name__ == '__main__':
data = np.random.randn(1000, 100)
wiggle(data)
plt.show()
|
1622528
|
from django.test import TestCase
from django.test.client import Client
from core.group.models import Group
from mozdns.tests.utils import create_fake_zone
from core.registration.static.models import StaticReg
from systems.tests.utils import create_fake_host
class KVApiTests(TestCase):
def setUp(self):
self.c = Client()
create_fake_zone('10.in-addr.arpa', suffix='')
root_domain = create_fake_zone('foobar.mozilla.com', suffix='')
system = create_fake_host(hostname="asdf.mozilla.com")
sreg = StaticReg.objects.create(
label='foo', domain=root_domain, system=system,
ip_type='4', ip_str='10.0.0.0'
)
g = Group.objects.create(name="foo")
self.test_objs = (
('groupkeyvalue', g),
('staticregkeyvalue', sreg),
('keyvalue', system),
)
def testCRUD(self):
for obj_class, o in self.test_objs:
self.do_stuff(obj_class, o)
def do_stuff(self, obj_class, o):
key = 'foo'
value = 'bar'
create = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/create/'.format(
kv_class=obj_class, obj_pk=o.pk
)
detail = '/en-US/core/keyvalue/api/{kv_class}/{obj_pk}/list/'.format(
kv_class=obj_class, obj_pk=o.pk
)
resp1 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp1.status_code, 201)
resp2 = self.c.post(create, {'key': key, 'value': value})
self.assertEqual(resp2.status_code, 400)
resp3 = self.c.get(detail)
self.assertEqual(resp3.status_code, 200)
resp4 = self.c.get(detail)
self.assertEqual(resp4.status_code, 200)
self.assertTrue(1, len(o.keyvalue_set.all()))
kv = o.keyvalue_set.all()[0]
update = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/update/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
new_value = "happy magic"
resp5 = self.c.post(update, {'key': key, 'value': new_value})
self.assertEqual(resp5.status_code, 200)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value)
# Does bad update do what it's supposed to?
resp6 = self.c.post(update, {'key': key, 'value': ''})
self.assertEqual(resp6.status_code, 400)
kv = o.keyvalue_set.get(pk=kv.pk)
self.assertEqual(kv.value, new_value) # Should be no change
delete = '/en-US/core/keyvalue/api/{kv_class}/{kv_pk}/delete/'.format(
kv_class=obj_class, kv_pk=kv.pk
)
resp6 = self.c.post(delete, {'key': key, 'value': new_value})
self.assertEqual(resp6.status_code, 204)
self.assertEqual(0, len(o.keyvalue_set.all()))
class TestCaseUtils(object):
def localize_url(self, url):
if 'en-US' not in url:
url = url.replace('mozdns', 'en-US/mozdns')
return url
|
1622650
|
import spacy
# Importe le Matcher
from spacy.____ import ____
nlp = spacy.load("fr_core_news_sm")
doc = nlp("Le constructeur Citröen présente la e-Méhari Courrèges au public.")
# Initialise le matcher avec le vocabulaire partagé
matcher = ____(____.____)
# Crée un motif qui recherche les deux tokens : "e-Méhari" et "Courrèges"
pattern = [____]
# Ajoute le motif au matcher
____.____("MEHARI_PATTERN", None, ____)
# Utilise le matcher sur le doc
matches = ____
print("Résultats :", [doc[start:end].text for match_id, start, end in matches])
|
1622711
|
import subprocess
import sys
from distutils.version import LooseVersion
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze'])
installed_packages = [r.decode().split('==')[0] for r in reqs.split()]
if 'torch' in installed_packages:
from rlcard.agents.dqn_agent import DQNAgent as DQNAgent
from rlcard.agents.nfsp_agent import NFSPAgent as NFSPAgent
from rlcard.agents.cfr_agent import CFRAgent
from rlcard.agents.human_agents.limit_holdem_human_agent import HumanAgent as LimitholdemHumanAgent
from rlcard.agents.human_agents.nolimit_holdem_human_agent import HumanAgent as NolimitholdemHumanAgent
from rlcard.agents.human_agents.leduc_holdem_human_agent import HumanAgent as LeducholdemHumanAgent
from rlcard.agents.human_agents.blackjack_human_agent import HumanAgent as BlackjackHumanAgent
from rlcard.agents.human_agents.uno_human_agent import HumanAgent as UnoHumanAgent
from rlcard.agents.random_agent import RandomAgent
|
1622716
|
import cv2
import numpy as np
import caffe
def get_landmark_net():
net_file = './landmark.prototxt'
caffe_model = './landmark.caffemodel'
caffe.set_mode_gpu()
net = caffe.Net(net_file, caffe_model, caffe.TEST)
return net
def get_pose_landmarks(net , img):
halfheight = img.shape[0] * 0.5
halfwidth = img.shape[1] * 0.5
img = cv2.resize(img, (48, 48), interpolation=cv2.INTER_LINEAR)
img = (img - 127.5) * 0.0078125
x = np.array(img)
x = np.transpose(x, (2, 0, 1))
net.blobs['data'].data[...][0] = x
out = net.forward()
pose = out['fc2'][0][0:3]
landmark = out['fc2'][0][3:]
pose = pose * 90.0
landmarks = []
for i in range(21):
point = []
point.append(landmark[i * 2 + 0] * halfwidth + halfwidth)
point.append(landmark[i * 2 + 1] * halfheight + halfheight)
landmarks.append(point)
landmarks = np.array(landmarks, dtype=np.int32)
return pose, landmarks
if __name__ == '__main__':
img = cv2.imread('./test.png')
pose, facelandmarks = get_pose_landmarks(landmark_net, img)
for point in facelandmarks:
cv2.circle(img, (point[0], point[1]), 0, (0, 0, 255), 2)
print 'pose: ', pose
cv2.imshow('', img)
cv2.waitKey(0)
|
1622730
|
import numpy as np
from toolkit.methods.pnpl import CvxPnPL, DLT, EPnPL, OPnPL
from toolkit.suites import parse_arguments, PnPLReal
from toolkit.datasets import Linemod, Occlusion
# reproducibility is a great thing
np.random.seed(0)
np.random.seed(42)
# parse console arguments
args = parse_arguments()
# Just a loading data scenario
if args.load:
session = PnPLReal.load(args.load)
session.print(args.print_mode)
quit()
# run something
session = PnPLReal(methods=[CvxPnPL, DLT, EPnPL, OPnPL])
session.run(data=[Linemod(args.datasets_prefix), Occlusion(args.datasets_prefix)])
# session.run(data=[Linemod(args.datasets_prefix)])
if args.save:
session.save(args.save)
session.print()
|
1622757
|
from collections import Counter, defaultdict
from typing import Dict
from allennlp.data.instance import Instance
from allennlp.data.fields import Field, TextField, LabelField, SequenceLabelField
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.vocabulary import Vocabulary
|
1622760
|
import json
import os
import pytz
def _bool_convert(value):
truthy = {"t", "true", "on", "y", "yes", "1", 1, 1.0, True}
falsy = {"f", "false", "off", "n", "no", "0", 0, 0.0, False}
if isinstance(value, str):
value = value.lower()
if value in truthy:
return True
if value in falsy:
return False
return bool(value)
class Db:
if os.environ.get('DB_USERNAME') is not None:
db_username = os.environ.get('DB_USERNAME')
db_password = <PASSWORD>('DB_PASSWORD')
db_port = os.environ.get('DB_PORT', '5432')
db_host = os.environ.get('DB_HOST', 'localhost')
db_database = os.environ.get('DB_DATABASE', 'termine')
url = f"postgresql://{db_username}:{db_password}@{db_host}:{db_port}/{db_database}"
else:
url = os.environ.get(
"DB_URL", 'postgresql://postgres:example@localhost:5432/termine')
class Settings:
claim_timeout_min = int(os.environ.get("CLAIM_TIMEOUT_MIN", 5))
num_display_slots = int(os.environ.get("DISPLAY_SLOTS_COUNT", 150))
tz = pytz.timezone(os.environ.get("TERMINE_TIME_ZONE", 'Europe/Berlin'))
disable_auth_for_booking = _bool_convert(
os.environ.get("DISABLE_AUTH", False))
use_ldap = _bool_convert(os.environ.get("USE_LDAP", False))
jwt_key = os.environ.get("JWT_SECRET_KEY", "")
class Ldap:
url = os.environ.get("LDAP_URL", "")
user_dn = os.environ.get("LDAP_SYSTEM_DN", "")
user_pw = os.environ.get("LDAP_SYSTEM_USER_PW", "")
user_coupon_number = int(os.environ.get("LDAP_USER_COUPONS", 3))
search_base = os.environ.get("LDAP_SEARCH_BASE", "")
search_filter = os.environ.get("LDAP_SEARCH_FILTER", "")
search_attribute = os.environ.get("LDAP_ATTRIBUTE", "")
use_tls = _bool_convert(os.environ.get("LDAP_USE_TLS", False))
port = int(os.environ.get("LDAP_PORT", 389))
tls_port = int(os.environ.get("LDAP_TLS_PORT", 636))
class FrontendSettings:
_inst = None
@classmethod
def by_env(cls):
env_name = os.environ.get("ENVIRONMENT", "local")
with open(os.path.join("config", 'by_env', f'{env_name}.json')) as file:
frontend_conf = json.load(file)
return frontend_conf
@classmethod
def instance_by_env(cls):
if not cls._inst:
cls._inst = cls.by_env()
return cls._inst
@classmethod
def json_by_env(cls):
return json.dumps(cls.instance_by_env())
seed = os.environ.get("PASSWORD_HASH_SEED_DO_NOT_CHANGE", 'Wir sind SEEED')
|
1622761
|
import numpy as np
fwhm_m = 2 * np.sqrt(2 * np.log(2))
def fwhm(sigma):
"""
Get full width at half maximum (FWHM) for a provided sigma /
standard deviation, assuming a Gaussian distribution.
"""
return fwhm_m * sigma
def gaussian(x_mean, x_std, shape):
return np.random.normal(x_mean, x_std, shape)
def truncated_gaussian(x_mean, x_std, x_min, shape):
"""
Sample from a normal distribution, but enforces a minimum value.
"""
return np.maximum(gaussian(x_mean, x_std, shape), x_min)
def chi2(x_mean, chi2_df, shape):
"""
Chi-squared distribution centered at a specific mean.
Parameters
----------
x_mean : float
chi2_df : int
Degrees of freedom for chi-squared
shape : list
Shape of output noise array
Returns
-------
dist : ndarray
Array of chi-squared noise
"""
return np.random.chisquare(df=chi2_df, size=shape) * x_mean / chi2_df
|
1622781
|
import unittest
import asyncio
import dns.resolver
import os
COLLECTOR_USER = os.getenv('COLLECTOR_USER')
if COLLECTOR_USER is None:
COLLECTOR_USER = "root"
my_resolver = dns.resolver.Resolver(configure=False)
my_resolver.nameservers = ['127.0.0.1']
my_resolver.port = 5553
my_resolver.timeout = 20
my_resolver.lifetime = 20
class ProcessProtocol(asyncio.SubprocessProtocol):
def __init__(self, is_ready, is_clientresponse):
self.is_ready = is_ready
self.is_clientresponse = is_clientresponse
self.transport = None
self.proc = None
def connection_made(self, transport):
self.transport = transport
self.proc = transport.get_extra_info('subprocess')
def pipe_data_received(self, fd, data):
print(data.decode(), end="")
if b"collector dnstap - receiver framestream initialized" in data:
self.is_ready.set_result(True)
if not self.is_clientresponse.done():
if b"CLIENT_RESPONSE NOERROR" in data:
self.is_clientresponse.set_result(True)
self.kill()
def kill(self):
try:
self.proc.kill()
except ProcessLookupError: pass
class TestDnstap(unittest.TestCase):
def setUp(self):
self.loop = asyncio.get_event_loop()
def test_stdout_recv(self):
"""test to receive dnstap response in stdout"""
async def run():
# run collector
is_ready = asyncio.Future()
is_clientresponse = asyncio.Future()
args = ( "sudo", "-u", COLLECTOR_USER, "-s", "./go-dnscollector", "-config", "./testsdata/config_stdout_dnstapunix.yml",)
transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_ready, is_clientresponse),
*args, stdout=asyncio.subprocess.PIPE)
# make some dns queries to force the dns server to connect to the collector
# in some products (dnsdist), connection is after incoming dns traffic
for i in range(20):
try:
my_resolver.resolve('www.github.com', 'a')
except: pass
# waiting for connection between collector and dns server is ok
try:
await asyncio.wait_for(is_ready, timeout=30.0)
except asyncio.TimeoutError:
protocol_collector.kill()
transport_collector.close()
self.fail("collector framestream timeout")
# make some dns queries again
for i in range(20):
try:
my_resolver.resolve('www.github.com', 'a')
except: pass
# wait client response on collector
try:
await asyncio.wait_for(is_clientresponse, timeout=30.0)
except asyncio.TimeoutError:
protocol_collector.kill()
transport_collector.close()
self.fail("dnstap client response expected")
# Shutdown all
protocol_collector.kill()
transport_collector.close()
self.loop.run_until_complete(run())
|
1622791
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
__all__ = ['get_drug_target_data', 'LincsClient', 'load_lincs_csv']
import os
import sys
import json
import logging
import requests
from io import StringIO, BytesIO
from indra.util import read_unicode_csv_fileobj
from indra.databases.identifiers import ensure_chembl_prefix
logger = logging.getLogger(__name__)
LINCS_URL = 'http://lincs.hms.harvard.edu/db'
resources = os.path.join(os.path.abspath(os.path.dirname(__file__)),
os.path.pardir, 'resources')
lincs_sm = os.path.join(resources, 'lincs_small_molecules.json')
lincs_prot = os.path.join(resources, 'lincs_proteins.json')
class LincsClient(object):
"""Client for querying LINCS small molecules and proteins."""
def __init__(self):
with open(lincs_sm, 'r') as fh:
self._sm_data = json.load(fh)
extra_sm_data = load_lincs_extras()
self._sm_data.update(extra_sm_data)
with open(lincs_prot, 'r') as fh:
self._prot_data = json.load(fh)
def get_small_molecule_name(self, hms_lincs_id):
"""Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule.
"""
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
if not entry:
return None
name = entry['Name']
return name
def get_small_molecule_refs(self, hms_lincs_id):
"""Get the id refs of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
dict
A dictionary of references.
"""
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
# If there is an entry then fill up the refs with existing values
mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID',
pubchem='PubChem CID', lincs='LINCS ID')
for k, v in mappings.items():
if entry.get(v):
key = k.upper()
value = entry[v]
# Swap in primary PubChem IDs where there is an outdated one
if key == 'PUBCHEM' and value in pc_to_primary_mappings:
value = pc_to_primary_mappings[value]
# Fix CHEMBL IDs
if key == 'CHEMBL':
value = ensure_chembl_prefix(value)
refs[key] = value
return refs
def get_protein_refs(self, hms_lincs_id):
"""Get the refs for a protein from the LINCs protein metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID for the protein
Returns
-------
dict
A dictionary of protein references.
"""
# TODO: We could get phosphorylation states from the protein data.
refs = {'HMS-LINCS': hms_lincs_id}
entry = self._get_entry_by_id(self._prot_data, hms_lincs_id)
# If there is no entry for this ID
if not entry:
return refs
mappings = dict(egid='Gene ID', up='UniProt ID')
for k, v in mappings.items():
if entry.get(v):
refs[k.upper()] = entry.get(v)
return refs
def _get_entry_by_id(self, resource, hms_lincs_id):
# This means it's a short ID
if '-' not in hms_lincs_id:
keys = [k for k in resource.keys() if k.startswith(hms_lincs_id)]
if not keys:
logger.debug('Couldn\'t find entry for %s' % hms_lincs_id)
return None
entry = resource[keys[0]]
# This means it's a full ID
else:
entry = resource.get(hms_lincs_id)
if not entry:
logger.debug('Couldn\'t find entry for %s' % hms_lincs_id)
return None
return entry
def get_drug_target_data():
"""Load the csv into a list of dicts containing the LINCS drug target data.
Returns
-------
data : list[dict]
A list of dicts, each keyed based on the header of the csv, with values
as the corresponding column values.
"""
url = LINCS_URL + '/datasets/20000/results'
return load_lincs_csv(url)
def _build_db_refs(lincs_id, data, **mappings):
db_refs = {'HMS-LINCS': lincs_id}
for db_ref, key in mappings.items():
if data[key]:
db_refs[db_ref.upper()] = data[key]
return db_refs
def load_lincs_csv(url):
"""Helper function to turn csv rows into dicts."""
resp = requests.get(url, params={'output_type': '.csv'}, timeout=120)
resp.raise_for_status()
if sys.version_info[0] < 3:
csv_io = BytesIO(resp.content)
else:
csv_io = StringIO(resp.text)
data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=','))
headers = data_rows[0]
return [{header: val for header, val in zip(headers, line_elements)}
for line_elements in data_rows[1:]]
def load_lincs_extras():
fname = os.path.join(resources, 'hms_lincs_extra.tsv')
with open(fname, 'r') as fh:
rows = [line.strip('\n').split('\t') for line in fh.readlines()]
return {r[0]: {'HMS LINCS ID': r[0],
'Name': r[1],
'ChEMBL ID': r[2] if r[2] else ''}
for r in rows[1:]}
# This is a set of mappings specific to HMS-LINCS that map outdated compound
# IDs appearing in HMS-LINCS to preferred compound IDs. This can be obtained
# more generally via indra.databases.pubchem_client, but this is a pre-compiled
# version here for fast lookups in this client.
pc_to_primary_mappings = \
{'23624255': '135564985',
'10451420': '135465539',
'10196499': '135398501',
'57899889': '135564632',
'53239990': '135564599',
'71433937': '136240579',
'53401173': '135539077',
'71543332': '135398499',
'5353940': '5169',
'49830557': '135398510',
'11258443': '135451019',
'68925359': '135440466',
'16750408': '135565545',
'57347681': '135565635',
'5357795': '92577',
'56965966': '135398516',
'24906282': '448949',
'66524294': '135398492',
'11696609': '135398495',
'9549301': '135473382',
'56965894': '135423438',
}
|
1622851
|
from django.urls import reverse
from django.conf import settings
from django.contrib.auth.hashers import check_password
from django.core.mail import EmailMultiAlternatives
from rest_framework import status
from restapi import models
from .base import APITestCaseExtended
from ..utils import decrypt_with_db_secret, get_static_bcrypt_hash_from_email
from mock import patch
import binascii
import random
import string
import os
import bcrypt
class RegistrationTests(APITestCaseExtended):
def test_get_authentication_register(self):
"""
Tests GET method on authentication_register
"""
url = reverse('authentication_register')
data = {}
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_authentication_register(self):
"""
Tests PUT method on authentication_register
"""
url = reverse('authentication_register')
data = {}
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_delete_authentication_register(self):
"""
Tests DELETE method on authentication_register
"""
url = reverse('authentication_register')
data = {}
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_create_account(self):
"""
Ensure we can create a new account object.
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.User.objects.count(), 1)
user = models.User.objects.get()
email_bcrypt = get_static_bcrypt_hash_from_email(email)
self.assertEqual(decrypt_with_db_secret(user.email), email)
self.assertEqual(user.email_bcrypt, email_bcrypt)
self.assertTrue(check_password(authkey, user.authkey))
self.assertEqual(user.public_key, public_key)
self.assertEqual(user.private_key, private_key)
self.assertEqual(user.private_key_nonce, private_key_nonce)
self.assertEqual(user.secret_key, secret_key)
self.assertEqual(user.secret_key_nonce, secret_key_nonce)
self.assertEqual(user.user_sauce, user_sauce)
self.assertTrue(user.is_active)
self.assertFalse(user.is_email_active)
def test_not_same_email(self):
"""
Ensure we can not create an account with the same email address twice
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'bbd90b581b9c956e9077a8c71f61ecd9bf9355bd1aac3590bd995028ed224ae0'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.User.objects.count(), 1)
user = models.User.objects.get()
self.assertEqual(decrypt_with_db_secret(user.email), email)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(models.User.objects.count(), 1)
self.assertTrue(response.data.get('email', False),
'E-Mail in error message does not exist in registration response')
def test_no_authoritative_email(self):
"""
Ensure we can not create an account with an authoritative email address
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = 'admin@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = '05aa27037cf893e2a4113ddbe8836e1bf395556669904902643670fbf3841338'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_account_email_no_email_syntax(self):
"""
Test to register with a email without email syntax
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('email'), [u'INVALID_EMAIL_FORMAT'])
def test_create_account_username_no_email_syntax(self):
"""
Test to register with a username without email syntax
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'INVALID_USERNAME_FORMAT'])
def test_create_account_username_not_in_allowed_domains(self):
"""
Test to register with a username that is not in allowed domains
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@nugrsiojuhsgd.com'
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'The provided domain in your username is not allowed for the registration on this server.'])
def test_create_account_username_with_not_allowed_chars(self):
"""
Test to register with a username that contains not allowed characters
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '!@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may only contain letters, numbers, periods and dashes.'])
def test_create_account_username_start_with_a_period(self):
"""
Test to register with a username that starts with a period
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
username = '.' + username
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'INVALID_USERNAME_FORMAT'])
def test_create_account_username_start_with_a_dash(self):
"""
Test to register with a username that starts with a dash
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
username = '-' + username
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may not start with a dash.'])
def test_create_account_username_end_with_a_period(self):
"""
Test to register with a username that ends with a period
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '.@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'INVALID_USERNAME_FORMAT'])
def test_create_account_username_not_contain_consecutive_periods(self):
"""
Test to register with a username that contains consecutive periods
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = 'njfgdopnsrgipojr..threhtr@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'INVALID_USERNAME_FORMAT'])
def test_create_account_username_not_contain_consecutive_dashes(self):
"""
Test to register with a username that contains consecutive dashes
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = 'njfgdopnsrgipojr--threhtr@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may not contain consecutive dashes.'])
def test_create_account_username_periods_following_dashes(self):
"""
Test to register with a username that contains periods following dashes
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = 'njfgdopnsrgipojr-.threhtr@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may not contain dashes followed by periods.'])
def test_create_account_username_dashes_following_periods(self):
"""
Test to register with a username that contains dashes following periods
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = 'njfgdopnsrgipojr.-threhtr@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may not contain periods followed by dashes.'])
def test_create_account_username_end_with_a_dash(self):
"""
Test to register with a username that ends with a dash
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '-@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may not end with a dash.'])
def test_create_account_username_with_only_one_chars(self):
"""
Test to register with a username that only has 1 char
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(1)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('username'), [u'Usernames may not be shorter than 2 chars.'])
@patch('restapi.views.register.settings', ALLOW_REGISTRATION=False)
def test_create_account_with_disabled_registration(self, patched_allow_registration):
"""
Ensure we cannot create a new account object while registration is disabled.
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('restapi.views.register.settings', ENFORCE_MATCHING_USERNAME_AND_EMAIL=True)
def test_create_account_with_enforced_matching_usernanme_and_email(self, patched_force_matched_username_and_email):
"""
Ensure we cannot create a new account object with mismatching username and email if ENFORCE_MATCHING_USERNAME_AND_EMAIL is set
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch.object(EmailMultiAlternatives, 'send')
def test_create_account_sends_mail(self, mocked_send):
"""
Ensure a mail is sent if a new account is created
"""
url = reverse('authentication_register')
email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@example.com'
username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '@' + settings.ALLOWED_DOMAINS[0]
authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
user_sauce = 'd25e29d812386431ec8f75ce4dce44464b57a9b742e7caeea78c9d984297c8f1'
data = {
'username': username,
'email': email,
'authkey': authkey,
'public_key': public_key,
'private_key': private_key,
'private_key_nonce': private_key_nonce,
'secret_key': secret_key,
'secret_key_nonce': secret_key_nonce,
'user_sauce': user_sauce,
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
mocked_send.assert_called_once()
|
1622878
|
import os
import numpy as np
import matplotlib.pyplot as plt
from . import helper_generic as hlp
from . import helper_site_response as sr
from . import helper_signal_processing as sig
from PySeismoSoil.class_frequency_spectrum import Frequency_Spectrum as FS
from PySeismoSoil.class_Vs_profile import Vs_Profile
class Ground_Motion:
"""
Class implementation of an earthquake ground motion.
Parameters
----------
data : str or numpy.ndarray
If str: the full file name on the hard drive containing the data.
If np.ndarray: the numpy array containing the motion data.
The motion data can be acceleration, velocity, or displacement.
The data can have one column (which contains the motion) or two
columns (1st column: time; 2nd column: motion). If only one column
is supplied, another input parameter ``dt`` must also be supplied.
unit : str
Valid values include:
['m', 'cm',
'm/s', 'cm/s',
'm/s/s', 'cm/s/s', 'gal', 'g']
motion_type : {'accel', 'veloc', 'displ'}
Specifying what type of motion "data" contains. It needs to be
consistent with "unit". For example, if motion_type is "accel" and
unit is "m/s", an exception will be raised.
dt : float
Recording time interval of the ground motion. If ``data`` has only one
column, this parameter must be supplied. If ``data`` has two columns,
this parameter is ignored.
sep : str
Delimiter character for reading the text file. If ``data`` is supplied as
a numpy array, this parameter is ignored.
**kwargs_to_genfromtxt :
Any extra keyword arguments will be passed to ``numpy.genfromtxt()``
function for loading the data from the hard drive (if applicable).
Attributes
----------
dt : float
Recording time interval of the motion.
time : numpy.ndarray
1D numpy array: the time points in seconds.
accel : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the acceleration in SI unit.
veloc : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the velocity in SI unit.
displ : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the displacement in SI unit.
pga, pgv, pgd : float
Peak ground acceleration, velocity, and displacement in SI unit.
pga_in_gal, pga_in_g, pgv_in_cm_s, pgd_in_cm : <float>
PGA, PGV, and PGD in other common units.
Arias_Intensity : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the Arias intensity.
Arias_Intensity_normalized : numpy.ndarray
A numpy array of two columns, whose first column is identical to "time",
and second column is the normalized Arias intensity.
peak_Arias_Intensity : float
The last element of the second column of Arias_Intensity.
T5_95 : float
The time interval (in seconds) between 5% of peak Arias intensity
to 95% of peak Arias intensity.
rms_accel, rms_veloc, rms_displ : float
Root-mean-square acceleration, velocity, and displacement of the motion.
_path_name, _file_name : str
Names of the directory and file of the input data, if a file name.
"""
def __init__(
self, data, *, unit, motion_type='accel', dt=None, sep='\t',
**kwargs_to_genfromtxt,
):
if isinstance(data, str): # a file name
self._path_name, self._file_name = os.path.split(data)
else:
self._path_name, self._file_name = None, None
data_, dt = hlp.read_two_column_stuff(data, delta=dt, sep=sep)
valid_unit_name = ['m', 'cm', 'm/s', 'cm/s', 'm/s/s', 'cm/s/s', 'gal', 'g']
if unit not in valid_unit_name:
if 's^2' in unit:
raise ValueError("Please use '/s/s' instead of 's^2' in `unit`.")
else:
raise ValueError(
"Invalid `unit` name. Valid names are: %s" % valid_unit_name
)
if motion_type not in ['accel', 'veloc', 'displ']:
raise ValueError("`motion_type` must be in {'accel', 'veloc', 'displ'}")
if (unit == 'g' or unit == 'gal') and motion_type != 'accel':
raise ValueError(
"If unit is 'g' or 'gal', then `motion_type` must be 'accel'."
)
if unit in ['cm', 'cm/s', 'cm/s/s', 'gal']:
data_[:, 1] = data_[:, 1] / 100.0 # cm --> m
elif unit == 'g':
data_[:, 1] = data_[:, 1] * 9.81 # g --> m/s/s
self.dt = float(dt) # float; unit: sec
self.npts = len(data_[:, 0]) # int; how many time points
self.time = np.linspace(0, self.dt*(self.npts-1), num=self.npts)
if motion_type == 'accel':
self.accel = data_ # numpy array, with length unit 'm'
self.veloc, self.displ = sr.num_int(self.accel)
elif motion_type == 'veloc':
self.accel = sr.num_diff(data_)
self.veloc = data_
self.displ = sr.num_int(data_)[0]
else: # displ
self.veloc = sr.num_diff(data_)
self.accel = sr.num_diff(self.veloc)
self.displ = data_
self.pga = float(np.max(np.abs(self.accel[:, 1])))
self.pgv = float(np.max(np.abs(self.veloc[:, 1])))
self.pgd = float(np.max(np.abs(self.displ[:, 1])))
self.pga_in_gal = self.pga * 100.0
self.pga_in_g = self.pga / 9.81
self.pgv_in_cm_s = self.pgv * 100.0
self.pgd_in_cm = self.pgd * 100.0
arias_result = self.__calc_Arias()
self.Arias_Intensity = arias_result[0]
self.Arias_Intensity_normalized = arias_result[1]
self.peak_Arias_Intensity = arias_result[2]
self.T5_95 = arias_result[3]
self.rms_accel, self.rms_veloc, self.rms_displ = self.__calc_RMS()
def __repr__(self):
"""
Basic information of a ground motion.
"""
text = 'n_pts=%d, dt=%.4gs, PGA=%.3gg=%.3ggal, PGV=%.3gcm/s, PGD=%.3gcm, T5_95=%.3gs'\
% (self.npts, self.dt, self.pga_in_g, self.pga_in_gal,
self.pgv_in_cm_s, self.pgd_in_cm, self.T5_95)
return text
def summary(self):
"""
Show a brief summary of the ground motion.
"""
print(self)
self.plot()
def get_Fourier_spectrum(
self, real_val=True, double_sided=False, show_fig=False,
):
"""
Get Fourier spectrum of the ground motion.
Parameters
----------
real_val : bool
Whether to return the amplitude (or "magnitude") of the complex
numbers.
double_sided : bool
Whether to return the second half of the spectrum (i.e. beyond the
Nyquist frequency).
show_fig : bool
Whether to show figures of the spectrum.
Return
------
fs : PySeismoSoil.class_frequency_spectrym.Frequency_Spectrum
A frequency spectrum object.
"""
x = sig.fourier_transform(
self.accel, real_val=real_val, double_sided=double_sided, show_fig=show_fig,
)
fs = FS(x)
return fs
def get_response_spectra(
self, T_min=0.01, T_max=10, n_pts=60, damping=0.05, show_fig=True,
parallel=False, n_cores=None, subsample_interval=1,
):
"""
Get elastic response spectra of the ground motion, using the "exact"
solution to the equation of motion (Section 5.2, Dynamics of Structures,
Second Edition, by <NAME>).
Parameters
----------
T_min : float
Minimum period value to calculate the response spectra. Unit: sec.
T_max : float
Maximum period value to calculate the response spectra. Unit: sec.
n_pts : int
Number of points you want for the response spectra. A high number
increases computation time.
damping : float
Damping of the dash pots. Do not use "percent" as unit. Unit: 1
(i.e., not percent).
show_fig : bool
Whether to show a figure of the response spectra.
parallel : bool
Whether to perform the calculation in parallel.
n_cores : int or ``None``
Number of cores to use in parallel. Not necessary if not ``parallel``.
subsample_interval : int
The interval at which to subsample the input acceleration in the
time domain. A higher number reduces computation time, but could
lead to less accurate results.
Returns
-------
(Tn, SA, PSA, SV, PSV, SD, fn) : tuple of 1D numpy.ndarray
Periods, spectral acceleration, pseudo spectral acceleration,
spectral velocity, pseudo spectral velocity, spectral displacement,
and frequencies, respectively. Units: SI.
"""
rs = sr.response_spectra(
self.accel, damping=damping, T_min=T_min,
T_max=T_max, n_pts=n_pts, show_fig=show_fig,
parallel=parallel, n_cores=n_cores,
subsample_interval=subsample_interval,
)
return rs
def plot(self, show_as_unit='m', fig=None, ax=None, figsize=(5,6), dpi=100):
"""
Plots acceleration, velocity, and displacement waveforms together.
Parameters
----------
show_as_unit : str
What unit to convert the ground motion into, when plotting.
fig : matplotlib.figure.Figure or ``None``
Figure object. If None, a new figure will be created.
ax : matplotlib.axes._subplots.AxesSubplot or ``None``
Axes object. If None, a new axes will be created.
figsize: (float, float)
Figure size in inches, as a tuple of two numbers. The figure
size of ``fig`` (if not ``None``) will override this parameter.
dpi : float
Figure resolution. The dpi of ``fig`` (if not ``None``) will override
this parameter.
Returns
-------
fig : matplotlib.figure.Figure
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object being created or being passed into this function.
"""
if self._file_name:
title = self._file_name
else:
title = ''
if show_as_unit == 'm':
accel_ = self.accel
elif show_as_unit == 'cm':
accel_ = self._unit_convert(unit='cm/s/s')
elif show_as_unit == 'g':
accel_ = self._unit_convert(unit='g')
else:
raise ValueError("`show_as_unit` can only be 'm', 'cm', or 'g'.")
fig, ax = sr.plot_motion(
accel_, unit=show_as_unit, title=title,
fig=fig, ax=ax, figsize=figsize, dpi=dpi,
)
return fig, ax
def _unit_convert(self, unit='m/s/s'):
"""
Convert the unit of acceleration. "In-place" conversion is not allowed,
because ground motions are always stored in SI units internally.
Parameters
----------
unit : {'m/s/s', 'cm/s/s', 'gal', 'g'}
What unit to convert the acceleration into.
Returns
-------
accel : numpy.ndarray
Acceleration time history with the desired unit. It is a 2D numpy
array wity two columns (time and acceleration).
"""
accel = self.accel.copy()
if unit == 'm/s/s':
pass
elif unit in ['cm/s/s', 'gal']:
accel[:, 1] *= 100 # m/s/s --> cm/s/s
elif unit == 'g':
accel[:, 1] /= 9.81 # m/s/s --> g
else:
raise ValueError('Unrecognized `unit`. Must be an acceleration unit.')
return accel
def __calc_RMS(self):
"""
Private method.
Returns RMS acceleration, velocity, and displacement. Unit: SI.
"""
acc = self.accel
vel, dis = sr.num_int(acc)
rms_accel = np.sqrt(np.mean(acc[:, 1] ** 2.0))
rms_veloc = np.sqrt(np.mean(vel[:, 1] ** 2.0))
rms_displ = np.sqrt(np.mean(dis[:, 1] ** 2.0))
return rms_accel, rms_veloc, rms_displ
def __arias_time_bounds(self, t, Ia_normalized, low_lim, high_lim):
"""
Private method.
Calculate lower and upper time bounds corresponding to two given
normalized Arias intensity percentages (e.g., [0.05, 0.95])
"""
if low_lim >= high_lim:
raise ValueError('low_lim must be smaller than high_lim.')
if t is None:
t = self.accel[:, 0]
if Ia_normalized is None:
Ia_normalized = self.Arias_Intensity_normalized[:, 1]
if len(t) != len(Ia_normalized):
raise ValueError('Ia_normalized and t must have the same length.')
n = len(t)
t_low = 0.0 # initialize this variable, in case low_lim <= 0 seconds
t_high = t[-1] # initialize t_high, in case high_lim >= max(time)
prev = Ia_normalized[0]
for i in range(n):
if Ia_normalized[i] >= low_lim and prev < low_lim:
t_low = t[i]
if Ia_normalized[i] >= high_lim and prev < high_lim:
t_high = t[i]
prev = Ia_normalized[i]
return t_low, t_high
def __calc_Arias(self, motion='accel', show_fig=False):
"""
Private method.
Calculate Arias intensity. Returns the intensity time series, peak
intensity, and T5_95 (time interval from 5% Arias intensity to 95%
Arias intensity).
"""
g = 9.81
if motion == 'accel':
t = self.accel[:, 0]
a = self.accel[:, 1]
elif motion == 'veloc':
t = self.veloc[:, 0]
a = self.veloc[:, 1]
elif motion == 'displ':
t = self.displ[:, 0]
a = self.displ[:, 1]
n = len(a)
dt = t[1] - t[0]
Ia_1col = np.zeros(n)
a_sq = a ** 2.0
for i in range(1,n):
Ia_1col[i] = Ia_1col[i - 1] + np.pi / (2 * g) * a_sq[i - 1] * dt
Ia_peak = float(Ia_1col[-1])
Ia = np.column_stack((t,Ia_1col))
Ia_norm_1col = Ia_1col / Ia_peak # normalized
Ia_norm = np.column_stack((t,Ia_norm_1col))
t_low, t_high = self.__arias_time_bounds(t, Ia_norm_1col, 0.05, 0.95)
T5_95 = t_high - t_low
if show_fig:
plt.figure()
ax = plt.axes()
ax.plot(t, Ia)
ax.grid(ls=':')
ax.set_xlabel('Time [sec]')
ax.set_ylabel('Arias intensity')
y_low, y_high = ax.get_ylim()
plt.plot([t_low, t_low], [y_low, y_high], lw=0.75, ls='--', c='r')
plt.plot([t_high, t_high], [y_low, y_high], lw=0.75, ls='--', c='r')
return Ia, Ia_norm, Ia_peak, T5_95
def scale_motion(self, factor=1.0, target_PGA_in_g=None):
"""
Scale ground motion, either by specifying a factor, or specifying a
target PGA level.
Parameters
----------
factor : float
The factor to multiply to the original acceleration (with the
unit of m/s/s)
target_PGA_in_g : float
The target PGA (in g). If ``target_PGA_in_g`` is not None, it
overrides ``factor``.
Returns
-------
scaled_motion : Ground_Motion
The scaled motion
"""
if target_PGA_in_g != None:
factor = target_PGA_in_g / self.pga_in_g
else: # factor != None, and target_PGA_in_g is None
pass
time = self.accel[:, 0]
acc = self.accel[:, 1]
acc_scaled = acc * factor
return Ground_Motion(np.column_stack((time, acc_scaled)), unit='m')
def truncate(self, limit, arias=True, extend=[0, 0], show_fig=False):
"""
Truncate ground motion, removing data points in the head and/or tail.
Parameters
----------
limit : (float, float) or [float, float]
The lower/upper bounds of time (e.g., [2, 95]) or normalized Arias
intensity (e.g., [0.05, 0.95]).
arias : bool
If ``True``, ``limit`` means the normalized Arias intensity.
Otherwise, ``limit`` means the actual time.
extend : tuple or list of two floats
How many seconds to extend before and after the original truncated
time limits. For example, if extend is [5, 5] sec, and the original
time limits are [3, 50] sec, then the actual time limits are
[0, 55] sec. (3 - 5 = -2 smaller than 0, so truncated at 0.)
show_fig : bool
Whether or not to show the waveforms before and after truncation.
Returns
-------
truncated_accel : Ground_Motion
Truncated ground motion.
fig : matplotlib.figure.Figure
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object being created or being passed into this function.
(n1, n2) : tuple<int>
The indices at which signal is truncated. In other words,
truncated_accel = original_accel[n1 : n2].
"""
if not isinstance(limit, (tuple, list)):
raise TypeError('"limit" must be a list/tuple of two elements.')
if len(limit) != 2:
raise ValueError('Length of "limit" must be 2.')
if not isinstance(extend, (tuple, list)):
raise TypeError('"extend" must be a list/tuple of two elements.')
if len(extend) != 2:
raise ValueError('Length of "extend" must be 2.')
if extend[0] < 0 or extend[1] < 0:
raise ValueError('extend should be non negative.')
lim1, lim2 = limit
if lim1 >= lim2:
raise ValueError('"limit" must be in ascending order.')
if not arias: # "limit" represents actual time limits
t1, t2 = lim1, lim2
else: # "limit" represents bounds of normalized Arias instensity
t1, t2 = self.__arias_time_bounds(None, None, lim1, lim2)
t1 -= extend[0]
t2 += extend[1]
n1 = int(t1 / self.dt)
n2 = int(t2 / self.dt)
if n1 < 0: n1 = 0
if n2 > self.npts: n2 = self.npts
time_trunc = self.accel[:n2-n1, 0]
accel_trunc = self.accel[n1:n2, 1]
truncated = np.column_stack((time_trunc, accel_trunc))
if show_fig:
ax = [None] * 3
fig = plt.figure(figsize=(5,6))
fig.subplots_adjust(left=0.2)
ax[0] = fig.add_subplot(3,1,1)
ax[0].plot(self.time, self.accel[:,1], 'gray', lw=1.75, label='original')
ax[0].plot(self.time[n1:n2], truncated[:,1], 'm', lw=1., label='truncated')
ax[0].grid(ls=':')
ax[0].set_ylabel('Accel. [m/s/s]')
ax[0].legend(loc='best')
ax[1] = fig.add_subplot(3,1,2)
ax[1].plot(self.time, self.veloc[:,1], 'gray', lw=1.75)
ax[1].plot(self.time[n1:n2], sr.num_int(truncated)[0][:,1], 'm', lw=1.)
ax[1].grid(ls=':')
ax[1].set_ylabel('Veloc. [m/s]')
ax[2] = fig.add_subplot(3,1,3)
ax[2].plot(self.time, self.displ[:,1], 'gray', lw=1.75)
ax[2].plot(self.time[n1:n2], sr.num_int(truncated)[1][:,1], 'm', lw=1.)
ax[2].grid(ls=':')
ax[2].set_ylabel('Displ. [m]')
ax[2].set_xlabel('Time [sec]')
fig.tight_layout(pad=0.3)
else:
fig, ax = None, None
return Ground_Motion(truncated, unit='m'), fig, ax, (n1, n2)
def amplify_by_tf(
self, transfer_function, taper=False, extrap_tf=True,
deconv=False, show_fig=False, dpi=100, return_fig_obj=False,
):
"""
Amplify (or de-amplify) ground motions in the frequency domain. The
mathematical process behind this function is as follows:
(1) INPUT = fft(input)
(2) OUTPUT = INPUT * TRANS_FUNC
(3) output = ifft(OUTPUT)
Parameters
----------
transfer_function : PySeismoSoil.class_frequency_spectrum.Frequency_Spectrum
The transfer function to apply to the ground motion. It only needs
to be "single-sided" (see notes below).
taper : bool
Whether to taper the input acceleration (using Tukey taper)
extrap_tf : bool
Whether to extrapolate the transfer function if its frequency range
does not reach the frequency range implied by the input motion
deconv : bool
If ``False``, a regular amplification is performed; otherwise, the
transfer function is "deducted" from the input motion ("deconvolution").
show_fig : bool
Whether or not to show an illustration of how the calculation is
carried out.
dpi : int
Desired DPI for the figures; only effective when ``show_fig`` is
``True``.
return_fig_obj : bool
Whether or not to return figure and axis objects to the caller.
Returns
-------
output_motion : Ground_Motion
The resultant ground motion in time domain
fig : matplotlib.figure.Figure, *optional*
The figure object being created or being passed into this function.
ax : matplotlib.axes._subplots.AxesSubplot, *optional*
The axes object being created or being passed into this function.
Notes
-----
"Single sided":
For example, the sampling time interval of ``input_motion`` is 0.01
sec, then the Nyquist frequency is 50 Hz. Therefore, the transfer
function needs to contain information at least up to the Nyquist
frequency, i.e., at least 0-50 Hz, and anything above 50 Hz will
not affect the input motion at all.
"""
if not isinstance(transfer_function, FS):
raise TypeError(
'`transfer_function` needs to be of type '
'`Frequency_Spectrum` (or its subclass).'
)
freq = transfer_function.freq
tf_1col = transfer_function.spectrum
transfer_function_single_sided = (freq, tf_1col)
result = sr.amplify_motion(
self.accel,
transfer_function_single_sided,
taper=taper,
extrap_tf=extrap_tf,
deconv=deconv,
show_fig=show_fig,
dpi=dpi,
return_fig_obj=return_fig_obj,
)
if return_fig_obj:
output_accel, fig, ax = result
return Ground_Motion(output_accel, unit='m'), fig, ax
else:
output_accel = result
return Ground_Motion(output_accel, unit='m')
def amplify(self, soil_profile, boundary='elastic', show_fig=False):
"""
Amplify the ground motion via a 1D soil profile, using linear site
amplification method.
Parameters
----------
soil_profile : PySeismoSoil.class_Vs_profile.Vs_Profile
The soil profile through which to deconvolve the gound motion.
boundary : {'elastic', 'rigid'}
The type of boundary of the bottom of the soil profile.
show_fig : bool
Whether or not to show a figure that illustrates the deconvolution
process.
Returns
-------
output_motion : Ground_Motion
The amplified ground motion.
"""
if not isinstance(soil_profile, Vs_Profile):
raise TypeError('`soil_profile` must be of type `Vs_Profile`.')
vs_profile = soil_profile.vs_profile
surface_motion = self.accel # note: unit is SI
response = sr.linear_site_resp(
vs_profile, surface_motion, deconv=False,
boundary=boundary, show_fig=show_fig,
)[0]
output_motion = Ground_Motion(response, unit='m')
return output_motion
def compare(
self, another_ground_motion, this_ground_motion_as_input=True,
smooth=True, input_accel_label='Input', output_accel_label='Output',
):
"""
Compare with another ground motion: plot comparison figures showing
two time histories and the transfer function between them.
Parameters
----------
another_ground_motion : Ground_Motion
Another ground motion object.
this_ground_motion_as_input : bool
If ``True``, this ground motion is treated as the input ground
motion. Otherwise, the other ground motion is treated as the input.
smooth : bool
In the comparison plot, whether or not to also show the smoothed
amplification factor.
input_accel_label : str
The text label for the input acceleration in the figure legend.
output_accel_label : str
The text label for the output acceleration in the figure legend.
Returns
-------
fig : matplotlib.figure.Figure
The figure object created in this function.
ax : matplotlib.axes._subplots.AxesSubplot
The axes object created in this function.
"""
if not isinstance(another_ground_motion, Ground_Motion):
raise TypeError('`another_ground_motion` must be a `Ground_Motion`.')
# END IF
if this_ground_motion_as_input:
accel_in = self.accel
accel_out = another_ground_motion.accel
else:
accel_in = another_ground_motion.accel
accel_out = self.accel
# END IF-ELSE
amp_ylabel = f'Amplification\n({input_accel_label} ➡ {output_accel_label})'
phs_ylabel = f'Phase shift [rad]\n({input_accel_label} ➡ {output_accel_label})'
fig, ax = sr.compare_two_accel(
accel_in,
accel_out,
smooth=smooth,
input_accel_label=input_accel_label,
output_accel_label=output_accel_label,
amplification_ylabel=amp_ylabel,
phase_shift_ylabel=phs_ylabel,
)
return fig, ax
def deconvolve(self, soil_profile, boundary='elastic', show_fig=False):
"""
Deconvolve the ground motion, i.e., propagate the motion downwards to
get the borehole motion (rigid boundary) or the "rock outcrop" motion
(elastic boundary).
Parameters
----------
soil_profile : PySeismoSoil.class_Vs_profile.Vs_Profile
The soil profile through which to deconvolve the gound motion.
boundary : {'elastic', 'rigid'}
The type of boundary of the bottom of the soil profile.
show_fig : bool
Whether or not to show a figure that illustrates the deconvolution
process.
Returns
-------
deconv_motion : Ground_Motion
The deconvolved motion on the rock outcrop or in a borehole.
"""
if not isinstance(soil_profile, Vs_Profile):
raise TypeError('`soil_profile` must be of type `Vs_Profile`.')
vs_profile = soil_profile.vs_profile
surface_motion = self.accel # note: unit is SI
response = sr.linear_site_resp(
vs_profile, surface_motion, deconv=True,
boundary=boundary, show_fig=show_fig,
)[0]
deconv_motion = Ground_Motion(response, unit='m')
return deconv_motion
def baseline_correct(self, cutoff_freq=0.20, show_fig=False):
"""
Baseline-correct the acceleration (via zero-phase-shift high-pass
method).
Parameters
----------
cutoff_freq : float
The frequency (unit: Hz) for high passing. Energies below this
frequency are filtered out.
show_fig : bool
Whether or not to show figures comparing before and after.
Returns
-------
corrected : Ground_Motion
The baseline-corrected ground motion, with SI units.
"""
accel_ = sig.baseline(self.accel, show_fig=show_fig, cutoff_freq=cutoff_freq)
return Ground_Motion(accel_, unit='m')
def lowpass(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift low-pass filtering.
Parameters
----------
cutoff_freq : float
Cut-off frequency (unit: Hz).
filter_order : int
Filter order.
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal.
"""
accel_ = sig.lowpass(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def highpass(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift high-pass filtering.
Pameters
--------
cutoff_freq : float
Cut-off frequency (unit: Hz).
filter_order : int
Filter order.
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal.
"""
accel_ = sig.highpass(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def bandpass(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift band-pass filtering.
Pameters
--------
cutoff_freq : [float, float]
Cut-off frequencies (in Hz), from low to high.
filter_order : int
Filter order.
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal
"""
accel_ = sig.bandpass(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def bandstop(self, cutoff_freq, show_fig=False, filter_order=4, padlen=150):
"""
Zero-phase-shift band-stop filtering.
Pameters
--------
cutoff_freq : [float, float]
Cut-off frequencies (in Hz), from low to high.
filter_order : int
Filter order.
padlen : int
padlen : int
Pad length (the number of elements by which to extend x at both ends
of axis before applying the filter). If None, use the default value
(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html).
Returns
-------
filtered : Ground_Motion
Filtered signal
"""
accel_ = sig.bandstop(
self.accel, cutoff_freq, show_fig=show_fig,
filter_order=filter_order, padlen=padlen,
)
return Ground_Motion(accel_, unit='m')
def save_accel(
self, fname, sep='\t', t_prec='%.5g', motion_prec='%.5g', unit='m/s/s',
):
"""
Saves the acceleration as a text file.
Parameters
----------
fname : str
File name (including path).
sep : str
Delimiter.
t_prec : str
The precision specifier for the "time" column.
motion_prec : str
The precision specifier for the "motion" column.
unit : str
What unit shall the exported acceleration be in.
"""
fmt = [t_prec, motion_prec]
data = self.accel
if unit == 'm/s/s':
pass
elif unit == 'g':
data[:,1] = data[:,1] / 9.81
elif unit in ['gal', 'cm/s/s']:
data[:,1] = data[:,1] * 100.0
np.savetxt(fname, data, fmt=fmt, delimiter=sep)
|
1622880
|
import random
from tree import AVLTree, SDPTree, GraphicalTree
avl_tree = AVLTree()
# sdp_tree = SDPTree()
random.seed(0)
# arr = random.sample(range(100), 100)
arr = range(10)
for i in arr:
avl_tree.add(10 - i)
# sdp_tree.add_sdp_rec(i)
g_tree = GraphicalTree(avl_tree, "Mega_tree", 1920, 1080)
print("n=100 Размер Контр. сумма Высота Средн.высота")
# print("СДП ", sdp_tree.size(), " ", sdp_tree.check_sum(), " ",
# sdp_tree.height(), " ", sdp_tree.medium_height())
print("АВЛ ", avl_tree.size(), " ", avl_tree.check_sum(), " ",
avl_tree.height(), " ", avl_tree.medium_height())
g_tree.start()
|
1622895
|
import os
import tempfile
import unittest
from pyepw.epw import Comments1, EPW
class TestComments1(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_comments_1(self):
obj = Comments1()
var_comments_1 = "comments_1"
obj.comments_1 = var_comments_1
epw = EPW(comments_1=obj)
epw.save(self.path, check=False)
epw2 = EPW()
epw2.read(self.path)
self.assertEqual(epw2.comments_1.comments_1, var_comments_1)
|
1622914
|
import torch
import itertools
from .cut_semantic_mask_model import CUTSemanticMaskModel
from . import networks
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from .modules import loss
from util.iter_calculator import IterCalculator
from util.network_group import NetworkGroup
class ReCUTSemanticMaskModel(CUTSemanticMaskModel):
def modify_commandline_options(parser, is_train=True):
parser = CUTSemanticMaskModel.modify_commandline_options(parser,is_train)
parser.add_argument('--adversarial_loss_p',action='store_true',help='if True, also train the prediction model with an adversarial loss')
parser.add_argument('--nuplet_size', type=int, default=3,help='Number of frames loaded')
parser.add_argument('--netP', type=str, default='unet_128', help='specify P architecture [resnet_9blocks | resnet_6blocks | resnet_attn | unet_256 | unet_128]')
parser.add_argument('--no_train_P_fake_images',action='store_true',help='if True, P wont be trained over fake images projections')
parser.add_argument('--projection_threshold',default=1.0,type=float,help='threshold of the real images projection loss below with fake projection and fake reconstruction losses are applied')
parser.add_argument('--P_lr', type=float, default=0.0002, help='initial learning rate for P networks')
return parser
def __init__(self, opt):
super().__init__(opt)
if self.opt.adversarial_loss_p:
self.loss_names_G += ["proj_fake_B_adversarial"]
self.loss_names_G += ["recut"]
self.loss_names_P = ["proj_real_B"]
if self.opt.adversarial_loss_p:
self.loss_names_P += ["proj_real_A_adversarial","proj_real_B_adversarial"]
self.loss_names = self.loss_names_G + self.loss_names_f_s + self.loss_names_D + self.loss_names_P
if self.opt.iter_size > 1 :
self.iter_calculator = IterCalculator(self.loss_names)
for i,cur_loss in enumerate(self.loss_names):
self.loss_names[i] = cur_loss + '_avg'
setattr(self, "loss_" + self.loss_names[i], 0)
self.visual_names += [["real_A_last","proj_fake_B"],["real_B_last","proj_real_B"]]
self.netP_B = networks.define_G((self.opt.nuplet_size-1) * opt.input_nc, opt.output_nc,opt.ngf, opt.netP, opt.norm, not opt.no_dropout, opt.G_spectral, opt.init_type, opt.init_gain,self.gpu_ids,padding_type=opt.G_padding_type,opt=self.opt)
self.model_names += ["P_B"]
self.optimizer_P = torch.optim.Adam(itertools.chain(self.netP_B.parameters()),lr=opt.P_lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_P)
if self.opt.no_train_P_fake_images:
self.group_P = NetworkGroup(networks_to_optimize=["P_B"],forward_functions=["forward_P"],backward_functions=["compute_P_loss"],loss_names_list=["loss_names_P"],optimizer=["optimizer_P"],loss_backward=["loss_P"])
self.networks_groups.insert(1,self.group_P)
else: # P and G networks will be trained in the same time
self.group_G = NetworkGroup(networks_to_optimize=["G","P_B"],forward_functions=["forward","forward_P"],backward_functions=["compute_G_loss","compute_P_loss"],loss_names_list=["loss_names_G","loss_names_P"],optimizer=["optimizer_G","optimizer_P"],loss_backward=["loss_G","loss_P"])
self.networks_groups[0] = self.group_G
self.criterionCycle = torch.nn.L1Loss()
def set_input(self, input):
super().set_input(input)
batch_size,nuplet,channels,h,w=self.real_A.shape
self.shape_fake=[batch_size,(nuplet-1)*channels,h,w]
self.real_A_last = self.real_A[:,-1]
self.real_B_last = self.real_B[:,-1]
self.input_A_label_last = self.input_A_label[:,-1]
self.input_B_label_last = self.input_B_label[:,-1]
self.real_A =self.real_A[:,:-1]
self.real_B =self.real_B[:,:-1]
self.input_A_label = self.input_A_label[:,:-1]
self.input_B_label = self.input_B_label[:,:-1]
self.real_A = torch.flatten(self.real_A,start_dim=0,end_dim=1)
self.real_B = torch.flatten(self.real_B,start_dim=0,end_dim=1)
self.input_A_label = torch.flatten(self.input_A_label,start_dim=0,end_dim=2)
self.input_B_label = torch.flatten(self.input_B_label,start_dim=0,end_dim=2)
def data_dependent_initialize(self, data):
self.set_input(data)
bs_per_gpu = self.real_A.size(0) // max(len(self.opt.gpu_ids), 1)
self.shape_fake[0]=bs_per_gpu
self.real_A = self.real_A[:bs_per_gpu]
self.real_B = self.real_B[:bs_per_gpu]
self.input_A_label=self.input_A_label[:bs_per_gpu]
if hasattr(self,'input_B_label'):
self.input_B_label=self.input_B_label[:bs_per_gpu]
self.forward() # compute fake images: G(A)
self.forward_P()
if self.opt.isTrain:
self.compute_P_loss()
self.compute_D_loss()
self.compute_f_s_loss()
self.compute_G_loss()
self.loss_D.backward()# calculate gradients for D
self.loss_f_s.backward()# calculate gradients for f_s
self.loss_G.backward()# calculate gradients for G
self.loss_P.backward()# calculate gradients for P
if self.opt.lambda_NCE > 0.0:
self.optimizer_F = torch.optim.Adam(self.netF.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, self.opt.beta2))
self.optimizers.append(self.optimizer_F)
for optimizer in self.optimizers:
optimizer.zero_grad()
visual_names_seg_A = ['input_A_label','gt_pred_A','pfB_max']
if hasattr(self,'input_B_label'):
visual_names_seg_B = ['input_B_label']
else:
visual_names_seg_B = []
visual_names_seg_B += ['gt_pred_B']
self.visual_names += [visual_names_seg_A,visual_names_seg_B]
if self.opt.out_mask and self.isTrain:
visual_names_out_mask_A = ['real_A_out_mask','fake_B_out_mask']
self.visual_names += [visual_names_out_mask_A]
def forward(self):
#self.shape_fake=[self.get_current_batch_size(),(self.opt.nuplet_size-1)*channels,self.opt.crop_size,self.opt.crop_size]
super().forward()
## Projection next time step over fake images
self.proj_fake_B = self.netP_B(self.fake_B.reshape(self.shape_fake))
def compute_G_loss(self):
super().compute_G_loss()
## GAN loss over fake images projection for G (and P if no_train_P_fake_images is False)
if self.opt.adversarial_loss_p:
self.loss_proj_fake_B_adversarial = self.compute_G_loss_GAN_generic(self.netD,"B",self.D_loss,fake_name="proj_fake_B")
else:
self.loss_proj_fake_B_adversarial = 0
## Recycle loss between fake images projection reconstruction and ground truth
if not hasattr(self, 'loss_proj_real_B') or (self.loss_proj_real_B) > self.opt.projection_threshold: #if P networks aren't accurate enough on real images, we don't use them on fake images:
self.loss_recut = 0
self.loss_proj_fake_B_adversarial = 0
else:
self.loss_recut = self.calculate_NCE_loss(self.real_A_last, self.proj_fake_B)
self.loss_G += self.loss_proj_fake_B_adversarial + self.loss_recut
def forward_P(self):
## Real images projection
self.proj_real_B = self.netP_B(self.real_B.reshape(self.shape_fake))
def compute_P_loss(self):
## Pixel to pixel loss between real images projection and ground truth
lambda_P=10
self.loss_proj_real_B = self.criterionCycle(self.proj_real_B, self.real_B_last) * lambda_P
## GAN loss over real images projection for P
if self.opt.adversarial_loss_p:
self.loss_proj_real_B_adversarial=self.compute_G_loss_GAN_generic(self.netD,"B",self.D_loss,fake_name="proj_real_B")
else:
self.loss_proj_real_B_adversarial = 0
self.loss_P = self.loss_proj_real_B + self.loss_proj_real_B_adversarial
def compute_D_loss(self):
super().compute_D_loss()
## GAN loss over fake images projections for D
self.loss_D += self.compute_D_loss_generic(self.netD,"B",self.D_loss,real_name="real_B_last",fake_name="proj_fake_B")
## GAN loss over real images projections for D
self.loss_D += self.compute_D_loss_generic(self.netD,"B",self.D_loss,real_name="real_B_last",fake_name="proj_real_B")
if self.opt.netD_global != "none":
## GAN loss over fake images projections for D_global
self.loss_D_global += self.compute_D_loss_generic(self.netD_global,"B",self.D_global_loss,real_name="real_B_last",fake_name="proj_fake_B")
## GAN loss over real images projections for D_global
self.loss_D_global += self.compute_D_loss_generic(self.netD_global,"B",self.D_global_loss,real_name="real_B_last",fake_name="proj_real_B")
self.loss_D = self.loss_D + self.loss_D_global
|
1622962
|
class WorkflowTranslator(object):
"""Base class for translators."""
def __init__(self):
pass
def translate(self, wf):
raise NotImplementedError("Needs implementation in derived classes.")
@staticmethod
def get_translator(service_name):
from popper.translators.translator_drone import DroneTranslator
from popper.translators.translator_task import TaskTranslator
if service_name == "drone":
return DroneTranslator()
elif service_name == "task":
return TaskTranslator()
else:
raise Exception(f"Unknown service {service_name}")
|
1622963
|
from google.cloud import storage
import gcsfs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
import pandas as pd
import numpy as np
from time import time
import re
cleanup_re = re.compile('[^a-z]+')
def cleanup(sentence):
sentence = sentence.lower()
sentence = cleanup_re.sub(' ', sentence).strip()
return sentence
def download_blob(blob, download_path):
blob.download_to_filename(download_path)
print('Blob {} downloaded to {}.'.format(
blob.name,
download_path))
def function_handler(request):
request_json = request.get_json(silent=True)
x = request_json['input']
dataset_bucket = request_json['dataset_bucket']
dataset_blob_name = request_json['dataset_blob_name']
model_bucket = request_json['model_bucket']
model_blob_name = request_json['model_blob_name']
storage_client = storage.Client()
model_file_path = "/tmp/" + model_blob_name
m_bucket = storage_client.get_bucket(model_bucket)
m_blob = m_bucket.blob(model_blob_name)
download_blob(m_blob, model_file_path)
fs = gcsfs.GCSFileSystem(project='Serverless-faas-workbench')
with fs.open(dataset_bucket+'/'+dataset_blob_name) as f:
df = pd.read_csv(f)
start = time()
df['train'] = df['Text'].apply(cleanup)
tfidf_vect = TfidfVectorizer(min_df=100).fit(df['train'])
df_input = pd.DataFrame()
df_input['x'] = [x]
df_input['x'] = df_input['x'].apply(cleanup)
X = tfidf_vect.transform(df_input['x'])
model = joblib.load(model_file_path)
y = model.predict(X)
print(y)
latency = time() - start
print(latency)
return "latency : " + str(latency)
|
1622976
|
from django.conf.urls import url
from experiments import views
urlpatterns = [
url(r'^goal/(?P<goal_name>[^/]+)/(?P<cache_buster>[^/]+)?$', views.record_experiment_goal, name="experiment_goal"),
url(r'^confirm_human/$', views.confirm_human, name="experiment_confirm_human"),
url(r'^change_alternative/(?P<experiment_name>[a-zA-Z0-9-_]+)/(?P<alternative_name>[a-zA-Z0-9-_]+)/$', views.change_alternative, name="experiment_change_alternative"),
]
|
1623017
|
from unittest import TestCase
from collections import namedtuple
import rx
from cyclotron import Component
from cyclotron.rx import run
class RunnerTestCase(TestCase):
def test_run_1snk(self):
''' Creates a cycle with one sink driver.
'''
MainDrivers = namedtuple('MainDrivers', ['drv1'])
MainSource = namedtuple('MainSource', [])
MainSink = namedtuple('MainSink', ['drv1'])
test_values = []
def drv1(sink):
sink.values.subscribe(lambda i: test_values.append(i))
return None
Drv1Sink = namedtuple('Drv1Sink', ['values'])
Drv1Driver = Component(call=drv1, input=Drv1Sink)
def main(sources):
val = rx.from_([1, 2, 3])
return MainSink(drv1=Drv1Sink(values=val))
drivers = MainDrivers(drv1=Drv1Driver)
dispose = run(Component(call=main, input=MainSource), drivers)
dispose()
self.assertEqual(3, len(test_values))
self.assertEqual(1, test_values[0])
self.assertEqual(2, test_values[1])
self.assertEqual(3, test_values[2])
def test_run_1srcsnk(self):
''' Creates a cycle with one sink/source driver.
'''
MainDrivers = namedtuple('MainDrivers', ['drv1'])
MainSource = namedtuple('MainSource', ['drv1'])
MainSink = namedtuple('MainSink', ['drv1'])
test_values = []
Drv1Source = namedtuple('Drv1Source', ['counter'])
Drv1Sink = namedtuple('Drv1Sink', ['values'])
def drv1(sink):
sink.values.subscribe(lambda i: test_values.append(i))
counter_stream = rx.from_([1, 2, 3])
return Drv1Source(counter=counter_stream)
Drv1Driver = Component(call=drv1, input=Drv1Sink)
def main(sources):
val = sources.drv1.counter
return MainSink(drv1=Drv1Sink(values=val))
drivers = MainDrivers(drv1=Drv1Driver)
dispose = run(Component(call=main, input=MainSource), drivers)
dispose()
self.assertEqual(3, len(test_values))
self.assertEqual(1, test_values[0])
self.assertEqual(2, test_values[1])
self.assertEqual(3, test_values[2])
def test_run_1src_1snk(self):
''' Creates a cycle with one sink driver and one source driver.
'''
test_values = []
Drv1Sink = namedtuple('Drv1Sink', ['values'])
def drv1(sink):
sink.values.subscribe(lambda i: test_values.append(i))
return None
Drv1Driver = Component(call=drv1, input=Drv1Sink)
Drv2Source = namedtuple('Drv2Source', ['counter'])
def drv2():
counter_stream = rx.from_([1, 2, 3])
return Drv2Source(counter=counter_stream)
Drv2Driver = Component(call=drv2, input=None)
MainDrivers = namedtuple('MainDrivers', ['drv1', 'drv2'])
MainSource = namedtuple('MainSource', ['drv2'])
MainSink = namedtuple('MainSink', ['drv1'])
def main(sources):
val = sources.drv2.counter
return MainSink(drv1=Drv1Sink(values=val))
drivers = MainDrivers(drv1=Drv1Driver, drv2=Drv2Driver)
dispose = run(Component(call=main, input=MainSource), drivers)
dispose()
self.assertEqual(3, len(test_values))
self.assertEqual(1, test_values[0])
self.assertEqual(2, test_values[1])
self.assertEqual(3, test_values[2])
|
1623053
|
import typing
import numpy as np
import pytest
from ebonite.core.objects import Model, ModelWrapper
from ebonite.core.objects.artifacts import Blobs
from ebonite.core.objects.core import EvaluationResults
from ebonite.core.objects.metric import Metric
from ebonite.core.objects.wrapper import PickleModelIO
class EvalModelWrapper(ModelWrapper):
def _exposed_methods_mapping(self) -> typing.Dict[str, str]:
return {'predict1': 'predict1', 'predict2': 'predict2', 'predict3': 'predict3'}
def predict1(self, data):
return np.mean(data, axis=1)
def predict2(self, data):
return np.mean(data, axis=1)
def predict3(self, data):
return np.mean(data, axis=1) >= 0.5
@pytest.fixture
def float_data():
return np.ones((5, 10)) * 0.3
@pytest.fixture
def float_target(float_data):
return np.mean(float_data, axis=1)
@pytest.fixture
def float_target2(float_target):
return 1. - float_target
@pytest.fixture
def bool_target(float_target):
return float_target >= .5
@pytest.fixture
def eval_model(float_data):
return Model('model', EvalModelWrapper(PickleModelIO()).bind_model('None', input_data=float_data), Blobs({}))
@pytest.fixture
def eval_model_saved(eval_model, task_saved):
return task_saved.push_model(eval_model)
@pytest.fixture
def eval_pipeline(eval_model_saved: Model):
pipeline = eval_model_saved.as_pipeline('predict1')
pipeline.name = 'pipeline'
return pipeline
class AccMetric(Metric):
def evaluate(self, truth, prediction):
return np.sum(truth == prediction) / len(truth)
@pytest.fixture
def accuracy_metric():
return AccMetric()
class MaeMetric(Metric):
def evaluate(self, truth: np.ndarray, prediction):
return np.mean(np.abs(truth.astype(float) - prediction.astype(float)))
@pytest.fixture
def mae_metric():
return MaeMetric()
@pytest.fixture
def task_with_evals(task_saved, eval_model_saved, eval_pipeline, float_data, float_target, float_target2, bool_target,
accuracy_metric, mae_metric):
task_saved.add_pipeline(eval_pipeline)
task_saved.add_metric('accuracy_score', accuracy_metric)
task_saved.add_metric('mean_absolute_error', mae_metric)
task_saved.add_evaluation('test_float1', float_data, float_target, ['accuracy_score', 'mean_absolute_error'])
task_saved.add_evaluation('test_float2', float_data, float_target2, ['accuracy_score', 'mean_absolute_error'])
task_saved.add_evaluation('test_bool', float_data, bool_target, ['accuracy_score', 'mean_absolute_error'])
task_saved.save()
return task_saved
def _check_float_eval(result: EvaluationResults, name: str, good):
assert name in result
eval = result[name]
assert len(eval.results) == 1
scores1 = eval.latest.scores
assert 'accuracy_score' in scores1
assert scores1['accuracy_score'] == (1 if good else 0)
assert 'mean_absolute_error' in scores1
if good:
assert scores1['mean_absolute_error'] == 0
else:
assert scores1['mean_absolute_error'] > 0
def test_task_evaluation(task_with_evals):
task_with_evals.evaluate_all()
pipeline = task_with_evals._meta.get_pipeline_by_name('pipeline', task_with_evals)
_check_float_eval(pipeline.evaluations, 'test_float1', True)
_check_float_eval(pipeline.evaluations, 'test_float2', False)
assert 'test_bool' not in pipeline.evaluations
model = task_with_evals._meta.get_model_by_name('model', task_with_evals)
assert 'predict1' in model.evaluations
predict1 = model.evaluations['predict1']
_check_float_eval(predict1, 'test_float1', True)
_check_float_eval(predict1, 'test_float2', False)
assert 'test_bool' not in predict1
assert 'predict2' in model.evaluations
predict2 = model.evaluations['predict2']
_check_float_eval(predict2, 'test_float1', True)
_check_float_eval(predict2, 'test_float2', False)
assert 'test_bool' not in predict2
assert 'predict3' in model.evaluations
predict3 = model.evaluations['predict3']
assert 'test_float1' not in predict3
assert 'test_float2' not in predict3
_check_float_eval(predict3, 'test_bool', True)
def test_evaluation_no_save(task_with_evals):
task_with_evals.evaluate_all(save_result=False)
pipeline = task_with_evals._meta.get_pipeline_by_name('pipeline', task_with_evals)
assert len(pipeline.evaluations) == 0
model = task_with_evals._meta.get_model_by_name('model', task_with_evals)
assert len(model.evaluations) == 0
def test_reevaluation(task_with_evals):
task_with_evals.evaluate_all()
task_with_evals.evaluate_all()
pipeline = task_with_evals._meta.get_pipeline_by_name('pipeline', task_with_evals)
assert len(pipeline.evaluations['test_float1'].results) == 1
model = task_with_evals._meta.get_model_by_name('model', task_with_evals)
assert len(model.evaluations['predict1']['test_float1'].results) == 1
def test_reevaluation_force(task_with_evals):
task_with_evals.evaluate_all()
task_with_evals.evaluate_all(force=True)
pipeline = task_with_evals._meta.get_pipeline_by_name('pipeline', task_with_evals)
assert len(pipeline.evaluations['test_float1'].results) == 2
model = task_with_evals._meta.get_model_by_name('model', task_with_evals)
assert len(model.evaluations['predict1']['test_float1'].results) == 2
def test_wrong_evaluation_raise(task_with_evals):
pipeline = task_with_evals._meta.get_pipeline_by_name('pipeline', task_with_evals)
with pytest.raises(ValueError):
pipeline.evaluate_set('aaa')
with pytest.raises(ValueError):
pipeline.evaluate_set('test_bool', raise_on_error=True)
model = task_with_evals._meta.get_model_by_name('model', task_with_evals)
with pytest.raises(ValueError):
model.evaluate_set('aaa')
with pytest.raises(ValueError):
model.evaluate_set('test_bool', method_name='predict1', raise_on_error=True)
|
1623064
|
from ..factory import Method
class setCustomLanguagePack(Method):
info = None # type: "languagePackInfo"
strings = None # type: "vector<languagePackString>"
|
1623066
|
import redis
from board import Board
class RedisBoard(Board):
"""This will create a message board that is backed by Redis."""
def __init__(self, *args, **kwargs):
"""Creates the Redis connection."""
self.redis = redis.Redis(*args, **kwargs)
def set_owner(self, owner):
self.owner = owner
def post_message(self, message):
"""This will append the message to the list."""
pass
def get_message(self):
"""This will pop a message off the list."""
pass
def _key(self):
if not self.key:
self.key = "%s-queue" % self.owner
return self.key
|
1623078
|
import numpy as np
from rllab.core.serializable import Serializable
from rllab.exploration_strategies.base import ExplorationStrategy
from sandbox.rocky.tf.spaces.box import Box
from sandbox.gkahn.gcg.utils import schedules
class GaussianStrategy(ExplorationStrategy, Serializable):
"""
Add gaussian noise
"""
def __init__(self, env_spec, endpoints, outside_value):
assert isinstance(env_spec.action_space, Box)
Serializable.quick_init(self, locals())
self._env_spec = env_spec
self.schedule = schedules.PiecewiseSchedule(endpoints=endpoints, outside_value=outside_value)
def reset(self):
pass
def add_exploration(self, t, action):
return np.clip(action + np.random.normal(size=len(action)) * self.schedule.value(t),
self._env_spec.action_space.low, self._env_spec.action_space.high)
|
1623090
|
import os
import json
import luigi
from . import check_components as component_tasks
from ..cluster_tasks import WorkflowBase
from ..paintera import unique_block_labels as unique_tasks
from ..paintera import label_block_mapping as mapping_tasks
from ..utils import volume_utils as vu
# TODO fail if check does not pass
class CheckWsWorkflow(WorkflowBase):
""" Check that watershed only has single connected
component per label. Currently, does NOT work for
two pass watershed.
"""
ws_path = luigi.Parameter()
ws_key = luigi.Parameter()
debug_path = luigi.Parameter()
def requires(self):
unique_task = getattr(unique_tasks, self._get_task_name('UniqueBlockLabels'))
mapping_task = getattr(mapping_tasks, self._get_task_name('LabelBlockMapping'))
component_task = getattr(component_tasks, self._get_task_name('CheckComponents'))
with vu.file_reader(self.ws_path, 'r') as f:
max_id = f[self.ws_key].attrs['maxId']
chunks = list(f[self.ws_key].chunks)
dep = unique_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.ws_path, output_path=self.debug_path,
input_key=self.ws_key, output_key='unique-labels',
dependency=self.dependency, prefix='debug_ws')
dep = mapping_task(tmp_folder=self.tmp_folder, max_jobs=self.max_jobs,
config_dir=self.config_dir,
input_path=self.debug_path, output_path=self.debug_path,
input_key='unique-labels', output_key='label-block-mapping',
number_of_labels=max_id + 1, dependency=dep,
prefix='debug_ws')
dep = component_task(input_path=self.debug_path, input_key='label-block-mapping',
output_path=self.debug_path, output_key='violating-fragment-ids',
number_of_labels=max_id + 1,
chunks=chunks, dependency=dep,
tmp_folder=self.tmp_folder,
max_jobs=self.max_jobs,
config_dir=self.config_dir)
return dep
@staticmethod
def get_config():
configs = super(CheckWsWorkflow, CheckWsWorkflow).get_config()
configs.update({'unique_block_labels': unique_tasks.UniqueBlockLabelsLocal.default_task_config(),
'label_block_mapping': mapping_tasks.LabelBlockMappingLocal.default_task_config(),
'check_components': component_tasks.CheckComponentsLocal.default_task_config()})
return configs
|
1623099
|
import sys,os
import pandas as pd
import fnmatch
import pyarrow as pa
import pyarrow.parquet as pq
import datetime
defaultcols=['store_code_uc','upc','units','price','feature','display','dma_code','retailer_code','upc_ver_uc','week_end']
# This is the main interface
# This pre-processes files directly downloaded from Kilts and understands the Kilts-Nielsen directory structure
# The processed results are saved by DMA code in a parquet file for fast reading and processing
#
# Arguments:
# - read_dir: base directory of Kilts-Nielsen files to search through
# - outfile: stub of file name for processed output, creates two files:
# - .parquet: with price and quantity data
# - .hdf: with stores and product tables (named "stores", and "prods")
# Filtering the data
# Optional Arguments:
# - statelist (list of two letter state abbreviations: eg ['CT','NY','NJ'])
# - dmalist (list of dma codes eg: [603, 506])
# - module_code a single module code (eg. 1344 for Cereal, etc.)
# - channel_filter: a list of channels (e.g ['F','D'] for food and drug stores)
def read_all_data_new(read_dir,outfile,statelist=None,dmalist=None,module_code=None,channel_filter=['F','M'],cols=defaultcols):
# list of price-quantity files
fns=get_files(read_dir,'Movement')
# filter numeric part of module code out of filename
if module_code:
fns=[s for s in fns if module_code ==int(os.path.split(s)[1].split('_')[0])]
# this does all of the work
df=pd.concat([read_single_file_new(fn,read_dir,statelist,dmalist,channel_filter) for fn in fns],ignore_index=True)
# some cleaning up to reduce space
df.feature.fillna(-1,inplace=True)
df.display.fillna(-1,inplace=True)
df['display']=df['display'].astype('int8')
df['feature']=df['feature'].astype('int8')
df['panel_year']=df['panel_year'].astype('int16')
df['store_zip3']=df.store_zip3.astype('int16')
df['retailer_code']=df['retailer_code'].astype('int16')
df['dma_code']=df['dma_code'].astype('int16')
df['prmult']=df['prmult'].astype('int8')
# fix 2 for $5.00 as $2.50
df.loc[df.prmult>1,'price']=df.loc[df.prmult>1,'price']/df.loc[df.prmult>1,'prmult']
# Read the products (matching only)
prods=pd.merge(pd.read_table(get_files(read_dir,'products.tsv')[0]),df[['upc','upc_ver_uc']].drop_duplicates(),on=['upc','upc_ver_uc'])
print("Number of Products: ",str(len(prods)))
# Read the stores (matching only)
stores=pd.concat([get_stores(read_dir,statelist=None,my_year=my_year,dmalist=dmalist) for my_year in range(2006,2015+1)]).groupby(level=0).last()
# Use python dates not Nielsen dates
df=fix_dates(df)
# Write to an parquet file (too big for other formats!)
write_by_dma(df[cols],outfile+'.parquet')
# Write the rest as HDF5 tables
stores.to_hdf(outfile+'.hdf','stores',table=False,append=False)
prods.drop_duplicates().to_hdf(outfile+'.hdf','prods',table=False,append=False)
# This reads a single movement file
def read_single_file_new(fn,read_dir,statelist=None,dmalist=None,channel_filter=None):
print ("Processing ",fn)
my_year=int(fn.split('_')[-1].split('.')[0])
rms=pd.read_table(filter_year(get_files(read_dir,'rms_version'),my_year))
all_stores = get_stores(read_dir,statelist,my_year,storelist=None,dmalist=dmalist)[['store_zip3','dma_code','channel_code','retailer_code']]
if channel_filter:
our_stores=all_stores[all_stores.channel_code.isin(list(channel_filter))]
else:
our_stores=all_stores
return pd.merge(pd.merge(pd.read_table(fn),our_stores,left_on='store_code_uc',right_index=True),rms,on='upc')
# This fixes nielsen dates to python dates
def fix_dates(df):
a=df.week_end.unique()
x=pd.Series(a,index=a,name='week')
return pd.merge(df,pd.DataFrame(x.apply(lambda z :split_date(z))),left_on='week_end',right_index=True).drop(columns='week_end').rename(columns={'week':'week_end'})
def split_date(x):
y=str(x)
return datetime.datetime(int(y[0:4]), int(y[4:6]), int(y[6:8]))
# Some file utilities
def get_files(mydir,myfilter):
matches = []
for root, dirnames, filenames in os.walk(mydir):
for filename in fnmatch.filter(filenames, '*.tsv'):
matches.append(os.path.join(root, filename))
return [s for s in matches if myfilter in s]
def filter_year(fns,year):
return [x for x in fns if str(year) in x][0]
def get_stores(read_dir,statelist,my_year,storelist=None,dmalist=None):
fns=[s for s in get_files(read_dir,'stores') if str(my_year) in s]
stores=pd.read_table(fns[0],index_col='store_code_uc')
stores.fips_state_descr.value_counts()
stores['channel_code']=stores.channel_code.astype('category')
stores['retailer_code']=stores.retailer_code.combine_first(stores.parent_code)
if statelist:
stores=stores[stores.fips_state_descr.isin(statelist)]
if dmalist:
stores=stores[stores.dma_code.isin(dmalist)]
stores.dma_descr.value_counts()
if storelist:
stores=stores[stores.index.isin(storelist)]
return stores
##
# Utilities to read and write parquet files
##
# can pass a wrapper that processes each group and list of columns
def read_parquet_groups(pq_file,read_func=pd.DataFrame,col_list=None):
parquet_file = pq.ParquetFile(pq_file)
super_df =[]
for i in range(0,parquet_file.num_row_groups):
super_df.append(read_func(parquet_file.read_row_group(i,nthreads=4,columns=col_list,use_pandas_metadata=True).to_pandas(nthreads=4)))
return pd.concat(super_df, axis=0)
def write_by_dma(super_df,filename):
# Write our data to a parquet file -- each row group is a a DMA
arrow_Table=pa.Table.from_pandas(super_df)
writer = pq.ParquetWriter(filename,arrow_Table.schema,compression='brotli')
for x in super_df.dma_code.unique():
writer.write_table(pa.Table.from_pandas(super_df[super_df.dma_code==x]))
if writer:
writer.close()
|
1623105
|
import ipaddress
import re
from django.contrib import messages
from django.contrib.admin.options import IncorrectLookupParameters
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from ralph.admin.filters import (
ChoicesListFilter,
SEARCH_OR_SEPARATORS_REGEX,
TextListFilter
)
PRIVATE_NETWORK_CIDRS = [
'10.0.0.0/8', '172.16.0.0/12', '192.168.0.0/16',
]
def get_private_network_filter():
filter_ = Q()
for private_cidr in PRIVATE_NETWORK_CIDRS:
network = ipaddress.ip_network(private_cidr)
min_ip = int(network.network_address)
max_ip = int(network.broadcast_address)
filter_ |= Q(min_ip__gte=min_ip, max_ip__lte=max_ip)
return filter_
PRIVATE_NETWORK_FILTER = get_private_network_filter()
def _add_incorrect_value_message(request, label):
messages.warning(
request, _('Incorrect value in "%(field_name)s" filter') % {
'field_name': label
}
)
class IPRangeFilter(TextListFilter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.title = _('IP range')
def queryset(self, request, queryset):
if self.value():
try:
network = ipaddress.ip_network(self.value())
except ValueError:
return queryset
min_ip = int(network.network_address)
max_ip = int(network.broadcast_address)
queryset = queryset.filter(number__gte=min_ip, number__lte=max_ip)
return queryset
class NetworkRangeFilter(TextListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.model_admin = model_admin
super().__init__(
field, request, params, model, model_admin, field_path
)
self.title = _('Network range')
def queryset(self, request, queryset):
if self.value():
try:
network = ipaddress.ip_network(self.value())
except ValueError:
return queryset
min_ip = int(network.network_address)
max_ip = int(network.broadcast_address)
queryset = queryset.filter(min_ip__gte=min_ip, max_ip__lte=max_ip)
return queryset
class NetworkClassFilter(ChoicesListFilter):
_choices_list = [
('private', _('Private')),
('public', _('Public')),
]
def queryset(self, request, queryset):
if not self.value():
return queryset
if self.value().lower() == 'private':
queryset = queryset.filter(PRIVATE_NETWORK_FILTER)
elif self.value().lower() == 'public':
queryset = queryset.exclude(PRIVATE_NETWORK_FILTER)
return queryset
class ContainsIPAddressFilter(TextListFilter):
title = _('Contains IP address')
parameter_name = 'contains_ip'
def __init__(self, field, request, params, model, model_admin, field_path):
super(ContainsIPAddressFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
self.title = _('Contains IP address')
def queryset(self, request, queryset):
if not self.value():
return queryset
filter_query = Q()
for str_addr in re.split(SEARCH_OR_SEPARATORS_REGEX, self.value()):
try:
address = int(ipaddress.ip_address(str_addr))
except ValueError:
_add_incorrect_value_message(request, self.title)
raise IncorrectLookupParameters()
filter_query = filter_query | Q(
min_ip__lte=address,
max_ip__gte=address
)
queryset = queryset.filter(filter_query)
return queryset
|
1623135
|
import FWCore.ParameterSet.Config as cms
dtTTrigResidualCorrection = cms.EDAnalyzer("DTTTrigCorrection",
dbLabel = cms.untracked.string(''),
correctionAlgo = cms.string('DTTTrigResidualCorrection'),
correctionAlgoConfig = cms.PSet(
residualsRootFile = cms.string(''),
#rootBaseDir = cms.untracked.string('/DQMData/DT/DTCalibValidation'),
rootBaseDir = cms.untracked.string('DTResiduals'),
dbLabel = cms.untracked.string(''),
useFitToResiduals = cms.bool(True)
)
)
|
1623158
|
import logging
from typing import Optional, Union
from redbot.core import commands, bank
from redbot.core.i18n import Translator
from redbot.core.commands import Context
from .converter import RoleHierarchyConverter
from .abc import RoleToolsMixin, roletools
log = logging.getLogger("red.Trusty-cogs.RoleTools")
_ = Translator("RoleTools", __file__)
class RoleToolsSettings(RoleToolsMixin):
"""This class handles setting the roletools role settings."""
@roletools.command()
@commands.admin_or_permissions(manage_roles=True)
async def selfadd(
self, ctx: Context, true_or_false: Optional[bool] = None, *, role: RoleHierarchyConverter
) -> None:
"""
Set whether or not a user can apply the role to themselves.
`[true_or_false]` optional boolean of what to set the setting to.
If not provided the current setting will be shown instead.
`<role>` The role you want to set.
"""
cur_setting = await self.config.role(role).selfassignable()
if true_or_false is None:
if cur_setting:
await ctx.send(_("The {role} role is self assignable.").format(role=role))
else:
command = f"`{ctx.clean_prefix}roletools selfadd yes {role.name}`"
await ctx.send(
_(
"The {role} role is not self assignable. Run the command "
"{command} to make it self assignable."
).format(role=role.mention, command=command)
)
return
if true_or_false is True:
await self.config.role(role).selfassignable.set(True)
await ctx.send(_("The {role} role is now self assignable.").format(role=role.mention))
if true_or_false is False:
await self.config.role(role).selfassignable.set(False)
await ctx.send(
_("The {role} role is no longer self assignable.").format(role=role.mention)
)
@roletools.command()
@commands.admin_or_permissions(manage_roles=True)
async def selfrem(
self, ctx: Context, true_or_false: Optional[bool] = None, *, role: RoleHierarchyConverter
) -> None:
"""
Set whether or not a user can remove the role from themselves.
`[true_or_false]` optional boolean of what to set the setting to.
If not provided the current setting will be shown instead.
`<role>` The role you want to set.
"""
cur_setting = await self.config.role(role).selfremovable()
if true_or_false is None:
if cur_setting:
await ctx.send(_("The {role} role is self removeable.").format(role=role.mention))
else:
command = f"`{ctx.clean_prefix}roletools selfrem yes {role.name}`"
await ctx.send(
_(
"The {role} role is not self removable. Run the command "
"{command} to make it self removeable."
).format(role=role.mention, command=command)
)
return
if true_or_false is True:
await self.config.role(role).selfremovable.set(True)
await ctx.send(_("The {role} role is now self removeable.").format(role=role.mention))
if true_or_false is False:
await self.config.role(role).selfremovable.set(False)
await ctx.send(
_("The {role} role is no longer self removeable.").format(role=role.mention)
)
@roletools.command()
@commands.admin_or_permissions(manage_roles=True)
async def atomic(self, ctx: Context, true_or_false: Optional[Union[bool, str]] = None) -> None:
"""
Set the atomicity of role assignment.
What this means is that when this is `True` roles will be
applied inidvidually and not cause any errors. When this
is set to `False` roles will be grouped together into one call.
This can cause race conditions if you have other methods of applying
roles setup when set to `False`.
`[true_or_false]` optional boolean of what to set the setting to.
To reset back to the default global rules use `clear`.
If not provided the current setting will be shown instead.
"""
cur_setting = await self.config.guild(ctx.guild).atomic()
if true_or_false is None or true_or_false not in ["clear", True, False]:
if cur_setting is True:
msg = _("This server is currently using atomic role assignment")
elif cur_setting is False:
msg = _("This server is not currently using atomic role assignment.")
else:
msg = _(
"This server currently using the global atomic "
"role assignment setting `{current_global}`."
).format(current_global=await self.config.atomic())
command = f"`{ctx.clean_prefix}roletools atomic yes`"
cmd_msg = _("Do {command} to atomically assign roles.").format(command=command)
await ctx.send(f"{msg} {cmd_msg}")
return
elif true_or_false is True:
await self.config.guild(ctx.guild).atomic.set(True)
await ctx.send(_("RoleTools will now atomically assign roles."))
elif true_or_false is False:
await self.config.guild(ctx.guild).atomic.set(False)
await ctx.send(_("RoleTools will no longer atomically assign roles."))
else:
await self.config.guild(ctx.guild).atomic.clear()
await ctx.send(_("RoleTools will now default to the global atomic setting."))
@roletools.command()
@commands.is_owner()
async def globalatomic(self, ctx: Context, true_or_false: Optional[bool] = None) -> None:
"""
Set the atomicity of role assignment.
What this means is that when this is `True` roles will be
applied inidvidually and not cause any errors. When this
is set to `False` roles will be grouped together into one call.
This can cause race conditions if you have other methods of applying
roles setup when set to `False`.
`[true_or_false]` optional boolean of what to set the setting to.
If not provided the current setting will be shown instead.
"""
cur_setting = await self.config.atomic()
if true_or_false is None:
if cur_setting:
await ctx.send(_("I am currently using atomic role assignment"))
else:
command = f"`{ctx.clean_prefix}roletools globalatomic yes`"
await ctx.send(
_(
"I am not currently using atomic role assignment. Do "
"{command} to atomically assign roles."
).format(command=command)
)
return
if true_or_false is True:
await self.config.atomic.clear()
await ctx.send(_("RoleTools will now atomically assign roles."))
if true_or_false is False:
await self.config.atomic.set(False)
await ctx.send(_("RoleTools will no longer atomically assign roles."))
@roletools.command()
@commands.admin_or_permissions(manage_roles=True)
async def cost(
self, ctx: Context, cost: Optional[int] = None, *, role: RoleHierarchyConverter
) -> None:
"""
Set whether or not a user can remove the role from themselves.
`[cost]` The price you want to set the role at in bot credits.
Setting this to 0 or lower will remove the cost.
If not provided the current setting will be shown instead.
`<role>` The role you want to set.
"""
if await bank.is_global() and not await self.bot.is_owner(ctx.author):
await ctx.send(
_("This command is locked to bot owner only while the bank is set to global.")
)
return
if cost is not None and cost >= await bank.get_max_balance(ctx.guild):
await ctx.send(_("You cannot set a cost higher than the maximum credits balance."))
return
cur_setting = await self.config.role(role).cost()
currency_name = await bank.get_currency_name(ctx.guild)
if cost is None:
if cur_setting:
await ctx.send(
_("The role {role} currently costs {cost} {currency_name}.").format(
role=role, cost=cost, currency_name=currency_name
)
)
else:
command = f"`{ctx.clean_prefix} roletools cost SOME_NUMBER {role.name}`"
await ctx.send(
_(
"The role {role} does not currently cost any {currency_name}. "
"Run the command {command} to allow this role to require credits."
).format(role=role.mention, command=command, currency_name=currency_name)
)
return
else:
if cost <= 0:
await self.config.role(role).cost.clear()
await ctx.send(
_("The {role} will not require any {currency_name} to acquire.").format(
role=role.mention, currency_name=currency_name
)
)
return
else:
await self.config.role(role).cost.set(cost)
await ctx.send(
_("The {role} will now cost {cost} {currency_name} to acquire.").format(
role=role.mention, cost=cost, currency_name=currency_name
)
)
@roletools.command()
@commands.admin_or_permissions(manage_roles=True)
async def sticky(
self, ctx: Context, true_or_false: Optional[bool] = None, *, role: RoleHierarchyConverter
) -> None:
"""
Set whether or not a role will be re-applied when a user leaves and rejoins the server.
`[true_or_false]` optional boolean of what to set the setting to.
If not provided the current setting will be shown instead.
`<role>` The role you want to set.
"""
cur_setting = await self.config.role(role).sticky()
if true_or_false is None:
if cur_setting:
await ctx.send(_("The {role} role is sticky.").format(role=role.mention))
else:
command = f"{ctx.clean_prefix}roletools sticky yes {role.name}"
await ctx.send(
_(
"The {role} role is not sticky. Run the command "
"{command} to make it sticky."
).format(role=role.mention, command=command)
)
return
if true_or_false is True:
await self.config.role(role).sticky.set(True)
await ctx.send(_("The {role} role is now sticky.").format(role=role.mention))
if true_or_false is False:
await self.config.role(role).sticky.set(False)
await ctx.send(_("The {role} role is no longer sticky.").format(role=role.mention))
@roletools.command(aliases=["auto"])
@commands.admin_or_permissions(manage_roles=True)
async def autorole(
self, ctx: Context, true_or_false: Optional[bool] = None, *, role: RoleHierarchyConverter
) -> None:
"""
Set a role to be automatically applied when a user joins the server.
`[true_or_false]` optional boolean of what to set the setting to.
If not provided the current setting will be shown instead.
`<role>` The role you want to set.
"""
cur_setting = await self.config.role(role).auto()
if true_or_false is None:
if cur_setting:
await ctx.send(
_("The role {role} is automatically applied on joining.").format(role=role)
)
else:
command = f"`{ctx.clean_prefix}roletools auto yes {role.name}`"
await ctx.send(
_(
"The {role} role is not automatically applied "
"when a member joins this server. Run the command "
"{command} to make it automatically apply when a user joins."
).format(role=role.mention, command=command)
)
return
if true_or_false is True:
async with self.config.guild(ctx.guild).auto_roles() as current_roles:
if role.id not in current_roles:
current_roles.append(role.id)
if ctx.guild.id not in self.settings:
self.settings[ctx.guild.id] = await self.config.guild(ctx.guild).all()
if role.id not in self.settings[ctx.guild.id]["auto_roles"]:
self.settings[ctx.guild.id]["auto_roles"].append(role.id)
await self.config.role(role).auto.set(True)
await ctx.send(
_("The {role} role will now automatically be applied when a user joins.").format(
role=role.mention
)
)
if true_or_false is False:
async with self.config.guild(ctx.guild).auto_roles() as current_roles:
if role.id in current_roles:
current_roles.remove(role.id)
if (
ctx.guild.id in self.settings
and role.id in self.settings[ctx.guild.id]["auto_roles"]
):
self.settings[ctx.guild.id]["auto_roles"].remove(role.id)
await self.config.role(role).auto.set(False)
await ctx.send(
_("The {role} role will not automatically be applied when a user joins.").format(
role=role.mention
)
)
|
1623160
|
import sys
from .helper import PillowTestCase
from PIL import Image
X = 255
class TestLibPack(PillowTestCase):
def assert_pack(self, mode, rawmode, data, *pixels):
"""
data - either raw bytes with data or just number of bytes in rawmode.
"""
im = Image.new(mode, (len(pixels), 1))
for x, pixel in enumerate(pixels):
im.putpixel((x, 0), pixel)
if isinstance(data, int):
data_len = data * len(pixels)
data = bytes(bytearray(range(1, data_len + 1)))
self.assertEqual(data, im.tobytes("raw", rawmode))
def test_1(self):
self.assert_pack("1", "1", b'\x01', 0, 0, 0, 0, 0, 0, 0, X)
self.assert_pack("1", "1;I", b'\x01', X, X, X, X, X, X, X, 0)
self.assert_pack("1", "1;R", b'\x01', X, 0, 0, 0, 0, 0, 0, 0)
self.assert_pack("1", "1;IR", b'\x01', 0, X, X, X, X, X, X, X)
self.assert_pack("1", "1", b'\xaa', X, 0, X, 0, X, 0, X, 0)
self.assert_pack("1", "1;I", b'\xaa', 0, X, 0, X, 0, X, 0, X)
self.assert_pack("1", "1;R", b'\xaa', 0, X, 0, X, 0, X, 0, X)
self.assert_pack("1", "1;IR", b'\xaa', X, 0, X, 0, X, 0, X, 0)
self.assert_pack(
"1", "L", b'\xff\x00\x00\xff\x00\x00', X, 0, 0, X, 0, 0)
def test_L(self):
self.assert_pack("L", "L", 1, 1, 2, 3, 4)
self.assert_pack("L", "L;16", b'\x00\xc6\x00\xaf', 198, 175)
self.assert_pack("L", "L;16B", b'\xc6\x00\xaf\x00', 198, 175)
def test_LA(self):
self.assert_pack("LA", "LA", 2, (1, 2), (3, 4), (5, 6))
self.assert_pack("LA", "LA;L", 2, (1, 4), (2, 5), (3, 6))
def test_P(self):
self.assert_pack("P", "P;1", b'\xe4', 1, 1, 1, 0, 0, 255, 0, 0)
self.assert_pack("P", "P;2", b'\xe4', 3, 2, 1, 0)
self.assert_pack("P", "P;4", b'\x02\xef', 0, 2, 14, 15)
self.assert_pack("P", "P", 1, 1, 2, 3, 4)
def test_PA(self):
self.assert_pack("PA", "PA", 2, (1, 2), (3, 4), (5, 6))
self.assert_pack("PA", "PA;L", 2, (1, 4), (2, 5), (3, 6))
def test_RGB(self):
self.assert_pack("RGB", "RGB", 3, (1, 2, 3), (4, 5, 6), (7, 8, 9))
self.assert_pack(
"RGB", "RGBX",
b'\x01\x02\x03\xff\x05\x06\x07\xff', (1, 2, 3), (5, 6, 7))
self.assert_pack(
"RGB", "XRGB",
b'\x00\x02\x03\x04\x00\x06\x07\x08', (2, 3, 4), (6, 7, 8))
self.assert_pack("RGB", "BGR", 3, (3, 2, 1), (6, 5, 4), (9, 8, 7))
self.assert_pack(
"RGB", "BGRX",
b'\x01\x02\x03\x00\x05\x06\x07\x00', (3, 2, 1), (7, 6, 5))
self.assert_pack(
"RGB", "XBGR",
b'\x00\x02\x03\x04\x00\x06\x07\x08', (4, 3, 2), (8, 7, 6))
self.assert_pack("RGB", "RGB;L", 3, (1, 4, 7), (2, 5, 8), (3, 6, 9))
self.assert_pack("RGB", "R", 1, (1, 9, 9), (2, 9, 9), (3, 9, 9))
self.assert_pack("RGB", "G", 1, (9, 1, 9), (9, 2, 9), (9, 3, 9))
self.assert_pack("RGB", "B", 1, (9, 9, 1), (9, 9, 2), (9, 9, 3))
def test_RGBA(self):
self.assert_pack(
"RGBA", "RGBA", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_pack(
"RGBA", "RGBA;L", 4, (1, 4, 7, 10), (2, 5, 8, 11), (3, 6, 9, 12))
self.assert_pack(
"RGBA", "RGB", 3, (1, 2, 3, 14), (4, 5, 6, 15), (7, 8, 9, 16))
self.assert_pack(
"RGBA", "BGR", 3, (3, 2, 1, 14), (6, 5, 4, 15), (9, 8, 7, 16))
self.assert_pack(
"RGBA", "BGRA", 4,
(3, 2, 1, 4), (7, 6, 5, 8), (11, 10, 9, 12))
self.assert_pack(
"RGBA", "ABGR", 4, (4, 3, 2, 1), (8, 7, 6, 5), (12, 11, 10, 9))
self.assert_pack(
"RGBA", "BGRa", 4,
(191, 127, 63, 4), (223, 191, 159, 8), (233, 212, 191, 12))
self.assert_pack(
"RGBA", "R", 1, (1, 0, 8, 9), (2, 0, 8, 9), (3, 0, 8, 0))
self.assert_pack(
"RGBA", "G", 1, (6, 1, 8, 9), (6, 2, 8, 9), (6, 3, 8, 9))
self.assert_pack(
"RGBA", "B", 1, (6, 7, 1, 9), (6, 7, 2, 0), (6, 7, 3, 9))
self.assert_pack(
"RGBA", "A", 1, (6, 7, 0, 1), (6, 7, 0, 2), (0, 7, 0, 3))
def test_RGBa(self):
self.assert_pack(
"RGBa", "RGBa", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_pack(
"RGBa", "BGRa", 4, (3, 2, 1, 4), (7, 6, 5, 8), (11, 10, 9, 12))
self.assert_pack(
"RGBa", "aBGR", 4, (4, 3, 2, 1), (8, 7, 6, 5), (12, 11, 10, 9))
def test_RGBX(self):
self.assert_pack(
"RGBX", "RGBX", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_pack(
"RGBX", "RGBX;L", 4, (1, 4, 7, 10), (2, 5, 8, 11), (3, 6, 9, 12))
self.assert_pack(
"RGBX", "RGB", 3, (1, 2, 3, X), (4, 5, 6, X), (7, 8, 9, X))
self.assert_pack(
"RGBX", "BGR", 3, (3, 2, 1, X), (6, 5, 4, X), (9, 8, 7, X))
self.assert_pack(
"RGBX", "BGRX",
b'\x01\x02\x03\x00\x05\x06\x07\x00\t\n\x0b\x00',
(3, 2, 1, X), (7, 6, 5, X), (11, 10, 9, X))
self.assert_pack(
"RGBX", "XBGR",
b'\x00\x02\x03\x04\x00\x06\x07\x08\x00\n\x0b\x0c',
(4, 3, 2, X), (8, 7, 6, X), (12, 11, 10, X))
self.assert_pack("RGBX", "R", 1,
(1, 0, 8, 9), (2, 0, 8, 9), (3, 0, 8, 0))
self.assert_pack("RGBX", "G", 1,
(6, 1, 8, 9), (6, 2, 8, 9), (6, 3, 8, 9))
self.assert_pack("RGBX", "B", 1,
(6, 7, 1, 9), (6, 7, 2, 0), (6, 7, 3, 9))
self.assert_pack("RGBX", "X", 1,
(6, 7, 0, 1), (6, 7, 0, 2), (0, 7, 0, 3))
def test_CMYK(self):
self.assert_pack("CMYK", "CMYK", 4,
(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_pack(
"CMYK", "CMYK;I", 4,
(254, 253, 252, 251), (250, 249, 248, 247), (246, 245, 244, 243))
self.assert_pack(
"CMYK", "CMYK;L", 4, (1, 4, 7, 10), (2, 5, 8, 11), (3, 6, 9, 12))
self.assert_pack("CMYK", "K", 1,
(6, 7, 0, 1), (6, 7, 0, 2), (0, 7, 0, 3))
def test_YCbCr(self):
self.assert_pack("YCbCr", "YCbCr", 3, (1, 2, 3), (4, 5, 6), (7, 8, 9))
self.assert_pack("YCbCr", "YCbCr;L", 3,
(1, 4, 7), (2, 5, 8), (3, 6, 9))
self.assert_pack(
"YCbCr", "YCbCrX",
b'\x01\x02\x03\xff\x05\x06\x07\xff\t\n\x0b\xff',
(1, 2, 3), (5, 6, 7), (9, 10, 11))
self.assert_pack(
"YCbCr", "YCbCrK",
b'\x01\x02\x03\xff\x05\x06\x07\xff\t\n\x0b\xff',
(1, 2, 3), (5, 6, 7), (9, 10, 11))
self.assert_pack("YCbCr", "Y", 1,
(1, 0, 8, 9), (2, 0, 8, 9), (3, 0, 8, 0))
self.assert_pack("YCbCr", "Cb", 1,
(6, 1, 8, 9), (6, 2, 8, 9), (6, 3, 8, 9))
self.assert_pack("YCbCr", "Cr", 1,
(6, 7, 1, 9), (6, 7, 2, 0), (6, 7, 3, 9))
def test_LAB(self):
self.assert_pack(
"LAB", "LAB", 3, (1, 130, 131), (4, 133, 134), (7, 136, 137))
self.assert_pack("LAB", "L", 1, (1, 9, 9), (2, 9, 9), (3, 9, 9))
self.assert_pack("LAB", "A", 1, (9, 1, 9), (9, 2, 9), (9, 3, 9))
self.assert_pack("LAB", "B", 1, (9, 9, 1), (9, 9, 2), (9, 9, 3))
def test_HSV(self):
self.assert_pack("HSV", "HSV", 3, (1, 2, 3), (4, 5, 6), (7, 8, 9))
self.assert_pack("HSV", "H", 1, (1, 9, 9), (2, 9, 9), (3, 9, 9))
self.assert_pack("HSV", "S", 1, (9, 1, 9), (9, 2, 9), (9, 3, 9))
self.assert_pack("HSV", "V", 1, (9, 9, 1), (9, 9, 2), (9, 9, 3))
def test_I(self):
self.assert_pack("I", "I;16B", 2, 0x0102, 0x0304)
self.assert_pack(
"I", "I;32S",
b'\x83\x00\x00\x01\x01\x00\x00\x83', 0x01000083, -2097151999)
if sys.byteorder == 'little':
self.assert_pack("I", "I", 4, 0x04030201, 0x08070605)
self.assert_pack(
"I", "I;32NS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', 0x01000083, -2097151999)
else:
self.assert_pack("I", "I", 4, 0x01020304, 0x05060708)
self.assert_pack(
"I", "I;32NS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', -2097151999, 0x01000083)
def test_F_float(self):
self.assert_pack(
"F", "F;32F", 4, 1.539989614439558e-36, 4.063216068939723e-34)
if sys.byteorder == 'little':
self.assert_pack(
"F", "F", 4, 1.539989614439558e-36, 4.063216068939723e-34)
self.assert_pack(
"F", "F;32NF", 4, 1.539989614439558e-36, 4.063216068939723e-34)
else:
self.assert_pack(
"F", "F", 4, 2.387939260590663e-38, 6.301941157072183e-36)
self.assert_pack(
"F", "F;32NF", 4, 2.387939260590663e-38, 6.301941157072183e-36)
class TestLibUnpack(PillowTestCase):
def assert_unpack(self, mode, rawmode, data, *pixels):
"""
data - either raw bytes with data or just number of bytes in rawmode.
"""
if isinstance(data, int):
data_len = data * len(pixels)
data = bytes(bytearray(range(1, data_len + 1)))
im = Image.frombytes(mode, (len(pixels), 1), data,
"raw", rawmode, 0, 1)
for x, pixel in enumerate(pixels):
self.assertEqual(pixel, im.getpixel((x, 0)))
def test_1(self):
self.assert_unpack("1", "1", b'\x01', 0, 0, 0, 0, 0, 0, 0, X)
self.assert_unpack("1", "1;I", b'\x01', X, X, X, X, X, X, X, 0)
self.assert_unpack("1", "1;R", b'\x01', X, 0, 0, 0, 0, 0, 0, 0)
self.assert_unpack("1", "1;IR", b'\x01', 0, X, X, X, X, X, X, X)
self.assert_unpack("1", "1", b'\xaa', X, 0, X, 0, X, 0, X, 0)
self.assert_unpack("1", "1;I", b'\xaa', 0, X, 0, X, 0, X, 0, X)
self.assert_unpack("1", "1;R", b'\xaa', 0, X, 0, X, 0, X, 0, X)
self.assert_unpack("1", "1;IR", b'\xaa', X, 0, X, 0, X, 0, X, 0)
self.assert_unpack("1", "1;8", b'\x00\x01\x02\xff', 0, X, X, X)
def test_L(self):
self.assert_unpack("L", "L;2", b'\xe4', 255, 170, 85, 0)
self.assert_unpack("L", "L;2I", b'\xe4', 0, 85, 170, 255)
self.assert_unpack("L", "L;2R", b'\xe4', 0, 170, 85, 255)
self.assert_unpack("L", "L;2IR", b'\xe4', 255, 85, 170, 0)
self.assert_unpack("L", "L;4", b'\x02\xef', 0, 34, 238, 255)
self.assert_unpack("L", "L;4I", b'\x02\xef', 255, 221, 17, 0)
self.assert_unpack("L", "L;4R", b'\x02\xef', 68, 0, 255, 119)
self.assert_unpack("L", "L;4IR", b'\x02\xef', 187, 255, 0, 136)
self.assert_unpack("L", "L", 1, 1, 2, 3, 4)
self.assert_unpack("L", "L;I", 1, 254, 253, 252, 251)
self.assert_unpack("L", "L;R", 1, 128, 64, 192, 32)
self.assert_unpack("L", "L;16", 2, 2, 4, 6, 8)
self.assert_unpack("L", "L;16B", 2, 1, 3, 5, 7)
self.assert_unpack("L", "L;16", b'\x00\xc6\x00\xaf', 198, 175)
self.assert_unpack("L", "L;16B", b'\xc6\x00\xaf\x00', 198, 175)
def test_LA(self):
self.assert_unpack("LA", "LA", 2, (1, 2), (3, 4), (5, 6))
self.assert_unpack("LA", "LA;L", 2, (1, 4), (2, 5), (3, 6))
def test_P(self):
self.assert_unpack("P", "P;1", b'\xe4', 1, 1, 1, 0, 0, 1, 0, 0)
self.assert_unpack("P", "P;2", b'\xe4', 3, 2, 1, 0)
# erroneous?
# self.assert_unpack("P", "P;2L", b'\xe4', 1, 1, 1, 0)
self.assert_unpack("P", "P;4", b'\x02\xef', 0, 2, 14, 15)
# erroneous?
# self.assert_unpack("P", "P;4L", b'\x02\xef', 2, 10, 10, 0)
self.assert_unpack("P", "P", 1, 1, 2, 3, 4)
self.assert_unpack("P", "P;R", 1, 128, 64, 192, 32)
def test_PA(self):
self.assert_unpack("PA", "PA", 2, (1, 2), (3, 4), (5, 6))
self.assert_unpack("PA", "PA;L", 2, (1, 4), (2, 5), (3, 6))
def test_RGB(self):
self.assert_unpack("RGB", "RGB", 3, (1, 2, 3), (4, 5, 6), (7, 8, 9))
self.assert_unpack("RGB", "RGB;L", 3, (1, 4, 7), (2, 5, 8), (3, 6, 9))
self.assert_unpack("RGB", "RGB;R", 3, (128, 64, 192), (32, 160, 96))
self.assert_unpack("RGB", "RGB;16L", 6, (2, 4, 6), (8, 10, 12))
self.assert_unpack("RGB", "RGB;16B", 6, (1, 3, 5), (7, 9, 11))
self.assert_unpack("RGB", "BGR", 3, (3, 2, 1), (6, 5, 4), (9, 8, 7))
self.assert_unpack("RGB", "RGB;15", 2, (8, 131, 0), (24, 0, 8))
self.assert_unpack("RGB", "BGR;15", 2, (0, 131, 8), (8, 0, 24))
self.assert_unpack("RGB", "RGB;16", 2, (8, 64, 0), (24, 129, 0))
self.assert_unpack("RGB", "BGR;16", 2, (0, 64, 8), (0, 129, 24))
self.assert_unpack("RGB", "RGB;4B", 2, (17, 0, 34), (51, 0, 68))
self.assert_unpack("RGB", "RGBX", 4, (1, 2, 3), (5, 6, 7), (9, 10, 11))
self.assert_unpack("RGB", "RGBX;L", 4, (1, 4, 7), (2, 5, 8), (3, 6, 9))
self.assert_unpack("RGB", "BGRX", 4, (3, 2, 1), (7, 6, 5), (11, 10, 9))
self.assert_unpack(
"RGB", "XRGB", 4, (2, 3, 4), (6, 7, 8), (10, 11, 12))
self.assert_unpack(
"RGB", "XBGR", 4, (4, 3, 2), (8, 7, 6), (12, 11, 10))
self.assert_unpack(
"RGB", "YCC;P",
b'D]\x9c\x82\x1a\x91\xfaOC\xe7J\x12', # random data
(127, 102, 0), (192, 227, 0), (213, 255, 170), (98, 255, 133))
self.assert_unpack("RGB", "R", 1, (1, 0, 0), (2, 0, 0), (3, 0, 0))
self.assert_unpack("RGB", "G", 1, (0, 1, 0), (0, 2, 0), (0, 3, 0))
self.assert_unpack("RGB", "B", 1, (0, 0, 1), (0, 0, 2), (0, 0, 3))
def test_RGBA(self):
self.assert_unpack(
"RGBA", "LA", 2, (1, 1, 1, 2), (3, 3, 3, 4), (5, 5, 5, 6))
self.assert_unpack(
"RGBA", "LA;16B", 4, (1, 1, 1, 3), (5, 5, 5, 7), (9, 9, 9, 11))
self.assert_unpack(
"RGBA", "RGBA", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_unpack(
"RGBA", "RGBAX", 5, (1, 2, 3, 4), (6, 7, 8, 9), (11, 12, 13, 14))
self.assert_unpack(
"RGBA", "RGBAXX", 6, (1, 2, 3, 4), (7, 8, 9, 10), (13, 14, 15, 16))
self.assert_unpack(
"RGBA", "RGBa", 4,
(63, 127, 191, 4), (159, 191, 223, 8), (191, 212, 233, 12))
self.assert_unpack(
"RGBA", "RGBa",
b'\x01\x02\x03\x00\x10\x20\x30\x7f\x10\x20\x30\xff',
(0, 0, 0, 0), (32, 64, 96, 127), (16, 32, 48, 255))
self.assert_unpack(
"RGBA", "RGBaX",
b'\x01\x02\x03\x00-\x10\x20\x30\x7f-\x10\x20\x30\xff-',
(0, 0, 0, 0), (32, 64, 96, 127), (16, 32, 48, 255))
self.assert_unpack(
"RGBA", "RGBaXX",
b'\x01\x02\x03\x00==\x10\x20\x30\x7f!!\x10\x20\x30\xff??',
(0, 0, 0, 0), (32, 64, 96, 127), (16, 32, 48, 255))
self.assert_unpack(
"RGBA", "RGBa;16L", 8,
(63, 127, 191, 8), (159, 191, 223, 16), (191, 212, 233, 24))
self.assert_unpack(
"RGBA", "RGBa;16L",
b'\x88\x01\x88\x02\x88\x03\x88\x00'
b'\x88\x10\x88\x20\x88\x30\x88\xff',
(0, 0, 0, 0), (16, 32, 48, 255))
self.assert_unpack(
"RGBA", "RGBa;16B", 8,
(36, 109, 182, 7), (153, 187, 221, 15), (188, 210, 232, 23))
self.assert_unpack(
"RGBA", "RGBa;16B",
b'\x01\x88\x02\x88\x03\x88\x00\x88'
b'\x10\x88\x20\x88\x30\x88\xff\x88',
(0, 0, 0, 0), (16, 32, 48, 255))
self.assert_unpack(
"RGBA", "BGRa", 4,
(191, 127, 63, 4), (223, 191, 159, 8), (233, 212, 191, 12))
self.assert_unpack(
"RGBA", "BGRa",
b'\x01\x02\x03\x00\x10\x20\x30\xff',
(0, 0, 0, 0), (48, 32, 16, 255))
self.assert_unpack(
"RGBA", "RGBA;I", 4,
(254, 253, 252, 4), (250, 249, 248, 8), (246, 245, 244, 12))
self.assert_unpack(
"RGBA", "RGBA;L", 4, (1, 4, 7, 10), (2, 5, 8, 11), (3, 6, 9, 12))
self.assert_unpack("RGBA", "RGBA;15", 2, (8, 131, 0, 0), (24, 0, 8, 0))
self.assert_unpack("RGBA", "BGRA;15", 2, (0, 131, 8, 0), (8, 0, 24, 0))
self.assert_unpack(
"RGBA", "RGBA;4B", 2, (17, 0, 34, 0), (51, 0, 68, 0))
self.assert_unpack(
"RGBA", "RGBA;16L", 8, (2, 4, 6, 8), (10, 12, 14, 16))
self.assert_unpack(
"RGBA", "RGBA;16B", 8, (1, 3, 5, 7), (9, 11, 13, 15))
self.assert_unpack(
"RGBA", "BGRA", 4, (3, 2, 1, 4), (7, 6, 5, 8), (11, 10, 9, 12))
self.assert_unpack(
"RGBA", "ARGB", 4, (2, 3, 4, 1), (6, 7, 8, 5), (10, 11, 12, 9))
self.assert_unpack(
"RGBA", "ABGR", 4, (4, 3, 2, 1), (8, 7, 6, 5), (12, 11, 10, 9))
self.assert_unpack(
"RGBA", "YCCA;P",
b']bE\x04\xdd\xbej\xed57T\xce\xac\xce:\x11', # random data
(0, 161, 0, 4), (255, 255, 255, 237),
(27, 158, 0, 206), (0, 118, 0, 17))
self.assert_unpack(
"RGBA", "R", 1, (1, 0, 0, 0), (2, 0, 0, 0), (3, 0, 0, 0))
self.assert_unpack(
"RGBA", "G", 1, (0, 1, 0, 0), (0, 2, 0, 0), (0, 3, 0, 0))
self.assert_unpack(
"RGBA", "B", 1, (0, 0, 1, 0), (0, 0, 2, 0), (0, 0, 3, 0))
self.assert_unpack(
"RGBA", "A", 1, (0, 0, 0, 1), (0, 0, 0, 2), (0, 0, 0, 3))
def test_RGBa(self):
self.assert_unpack(
"RGBa", "RGBa", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_unpack(
"RGBa", "BGRa", 4, (3, 2, 1, 4), (7, 6, 5, 8), (11, 10, 9, 12))
self.assert_unpack(
"RGBa", "aRGB", 4, (2, 3, 4, 1), (6, 7, 8, 5), (10, 11, 12, 9))
self.assert_unpack(
"RGBa", "aBGR", 4, (4, 3, 2, 1), (8, 7, 6, 5), (12, 11, 10, 9))
def test_RGBX(self):
self.assert_unpack("RGBX", "RGB", 3,
(1, 2, 3, X), (4, 5, 6, X), (7, 8, 9, X))
self.assert_unpack("RGBX", "RGB;L", 3,
(1, 4, 7, X), (2, 5, 8, X), (3, 6, 9, X))
self.assert_unpack("RGBX", "RGB;16B", 6, (1, 3, 5, X), (7, 9, 11, X))
self.assert_unpack("RGBX", "BGR", 3,
(3, 2, 1, X), (6, 5, 4, X), (9, 8, 7, X))
self.assert_unpack("RGBX", "RGB;15", 2, (8, 131, 0, X), (24, 0, 8, X))
self.assert_unpack("RGBX", "BGR;15", 2, (0, 131, 8, X), (8, 0, 24, X))
self.assert_unpack("RGBX", "RGB;4B", 2, (17, 0, 34, X), (51, 0, 68, X))
self.assert_unpack(
"RGBX", "RGBX", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_unpack(
"RGBX", "RGBXX", 5, (1, 2, 3, 4), (6, 7, 8, 9), (11, 12, 13, 14))
self.assert_unpack(
"RGBX", "RGBXXX", 6, (1, 2, 3, 4), (7, 8, 9, 10), (13, 14, 15, 16))
self.assert_unpack(
"RGBX", "RGBX;L", 4, (1, 4, 7, 10), (2, 5, 8, 11), (3, 6, 9, 12))
self.assert_unpack("RGBX", "RGBX;16L", 8,
(2, 4, 6, 8), (10, 12, 14, 16))
self.assert_unpack("RGBX", "RGBX;16B", 8,
(1, 3, 5, 7), (9, 11, 13, 15))
self.assert_unpack("RGBX", "BGRX", 4,
(3, 2, 1, X), (7, 6, 5, X), (11, 10, 9, X))
self.assert_unpack("RGBX", "XRGB", 4,
(2, 3, 4, X), (6, 7, 8, X), (10, 11, 12, X))
self.assert_unpack("RGBX", "XBGR", 4,
(4, 3, 2, X), (8, 7, 6, X), (12, 11, 10, X))
self.assert_unpack(
"RGBX", "YCC;P",
b'D]\x9c\x82\x1a\x91\xfaOC\xe7J\x12', # random data
(127, 102, 0, X), (192, 227, 0, X),
(213, 255, 170, X), (98, 255, 133, X))
self.assert_unpack("RGBX", "R", 1,
(1, 0, 0, 0), (2, 0, 0, 0), (3, 0, 0, 0))
self.assert_unpack("RGBX", "G", 1,
(0, 1, 0, 0), (0, 2, 0, 0), (0, 3, 0, 0))
self.assert_unpack("RGBX", "B", 1,
(0, 0, 1, 0), (0, 0, 2, 0), (0, 0, 3, 0))
self.assert_unpack("RGBX", "X", 1,
(0, 0, 0, 1), (0, 0, 0, 2), (0, 0, 0, 3))
def test_CMYK(self):
self.assert_unpack(
"CMYK", "CMYK", 4, (1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12))
self.assert_unpack(
"CMYK", "CMYKX", 5, (1, 2, 3, 4), (6, 7, 8, 9), (11, 12, 13, 14))
self.assert_unpack(
"CMYK", "CMYKXX", 6, (1, 2, 3, 4), (7, 8, 9, 10), (13, 14, 15, 16))
self.assert_unpack(
"CMYK", "CMYK;I", 4,
(254, 253, 252, 251), (250, 249, 248, 247), (246, 245, 244, 243))
self.assert_unpack(
"CMYK", "CMYK;L", 4, (1, 4, 7, 10), (2, 5, 8, 11), (3, 6, 9, 12))
self.assert_unpack("CMYK", "C", 1,
(1, 0, 0, 0), (2, 0, 0, 0), (3, 0, 0, 0))
self.assert_unpack("CMYK", "M", 1,
(0, 1, 0, 0), (0, 2, 0, 0), (0, 3, 0, 0))
self.assert_unpack("CMYK", "Y", 1,
(0, 0, 1, 0), (0, 0, 2, 0), (0, 0, 3, 0))
self.assert_unpack("CMYK", "K", 1,
(0, 0, 0, 1), (0, 0, 0, 2), (0, 0, 0, 3))
self.assert_unpack(
"CMYK", "C;I", 1, (254, 0, 0, 0), (253, 0, 0, 0), (252, 0, 0, 0))
self.assert_unpack(
"CMYK", "M;I", 1, (0, 254, 0, 0), (0, 253, 0, 0), (0, 252, 0, 0))
self.assert_unpack(
"CMYK", "Y;I", 1, (0, 0, 254, 0), (0, 0, 253, 0), (0, 0, 252, 0))
self.assert_unpack(
"CMYK", "K;I", 1, (0, 0, 0, 254), (0, 0, 0, 253), (0, 0, 0, 252))
def test_YCbCr(self):
self.assert_unpack(
"YCbCr", "YCbCr", 3, (1, 2, 3), (4, 5, 6), (7, 8, 9))
self.assert_unpack(
"YCbCr", "YCbCr;L", 3, (1, 4, 7), (2, 5, 8), (3, 6, 9))
self.assert_unpack(
"YCbCr", "YCbCrK", 4, (1, 2, 3), (5, 6, 7), (9, 10, 11))
self.assert_unpack(
"YCbCr", "YCbCrX", 4, (1, 2, 3), (5, 6, 7), (9, 10, 11))
def test_LAB(self):
self.assert_unpack(
"LAB", "LAB", 3, (1, 130, 131), (4, 133, 134), (7, 136, 137))
self.assert_unpack("LAB", "L", 1, (1, 0, 0), (2, 0, 0), (3, 0, 0))
self.assert_unpack("LAB", "A", 1, (0, 1, 0), (0, 2, 0), (0, 3, 0))
self.assert_unpack("LAB", "B", 1, (0, 0, 1), (0, 0, 2), (0, 0, 3))
def test_HSV(self):
self.assert_unpack("HSV", "HSV", 3, (1, 2, 3), (4, 5, 6), (7, 8, 9))
self.assert_unpack("HSV", "H", 1, (1, 0, 0), (2, 0, 0), (3, 0, 0))
self.assert_unpack("HSV", "S", 1, (0, 1, 0), (0, 2, 0), (0, 3, 0))
self.assert_unpack("HSV", "V", 1, (0, 0, 1), (0, 0, 2), (0, 0, 3))
def test_I(self):
self.assert_unpack("I", "I;8", 1, 0x01, 0x02, 0x03, 0x04)
self.assert_unpack("I", "I;8S", b'\x01\x83', 1, -125)
self.assert_unpack("I", "I;16", 2, 0x0201, 0x0403)
self.assert_unpack("I", "I;16S", b'\x83\x01\x01\x83', 0x0183, -31999)
self.assert_unpack("I", "I;16B", 2, 0x0102, 0x0304)
self.assert_unpack("I", "I;16BS", b'\x83\x01\x01\x83', -31999, 0x0183)
self.assert_unpack("I", "I;32", 4, 0x04030201, 0x08070605)
self.assert_unpack(
"I", "I;32S",
b'\x83\x00\x00\x01\x01\x00\x00\x83', 0x01000083, -2097151999)
self.assert_unpack("I", "I;32B", 4, 0x01020304, 0x05060708)
self.assert_unpack(
"I", "I;32BS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', -2097151999, 0x01000083)
if sys.byteorder == 'little':
self.assert_unpack("I", "I", 4, 0x04030201, 0x08070605)
self.assert_unpack("I", "I;16N", 2, 0x0201, 0x0403)
self.assert_unpack("I", "I;16NS",
b'\x83\x01\x01\x83', 0x0183, -31999)
self.assert_unpack("I", "I;32N", 4, 0x04030201, 0x08070605)
self.assert_unpack(
"I", "I;32NS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', 0x01000083, -2097151999)
else:
self.assert_unpack("I", "I", 4, 0x01020304, 0x05060708)
self.assert_unpack("I", "I;16N", 2, 0x0102, 0x0304)
self.assert_unpack("I", "I;16NS",
b'\x83\x01\x01\x83', -31999, 0x0183)
self.assert_unpack("I", "I;32N", 4, 0x01020304, 0x05060708)
self.assert_unpack(
"I", "I;32NS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', -2097151999, 0x01000083)
def test_F_int(self):
self.assert_unpack("F", "F;8", 1, 0x01, 0x02, 0x03, 0x04)
self.assert_unpack("F", "F;8S", b'\x01\x83', 1, -125)
self.assert_unpack("F", "F;16", 2, 0x0201, 0x0403)
self.assert_unpack("F", "F;16S", b'\x83\x01\x01\x83', 0x0183, -31999)
self.assert_unpack("F", "F;16B", 2, 0x0102, 0x0304)
self.assert_unpack("F", "F;16BS", b'\x83\x01\x01\x83', -31999, 0x0183)
self.assert_unpack("F", "F;32", 4, 67305984, 134678016)
self.assert_unpack(
"F", "F;32S",
b'\x83\x00\x00\x01\x01\x00\x00\x83', 16777348, -2097152000)
self.assert_unpack("F", "F;32B", 4, 0x01020304, 0x05060708)
self.assert_unpack(
"F", "F;32BS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', -2097152000, 16777348)
if sys.byteorder == 'little':
self.assert_unpack("F", "F;16N", 2, 0x0201, 0x0403)
self.assert_unpack(
"F", "F;16NS",
b'\x83\x01\x01\x83', 0x0183, -31999)
self.assert_unpack("F", "F;32N", 4, 67305984, 134678016)
self.assert_unpack(
"F", "F;32NS",
b'\x83\x00\x00\x01\x01\x00\x00\x83', 16777348, -2097152000)
else:
self.assert_unpack("F", "F;16N", 2, 0x0102, 0x0304)
self.assert_unpack(
"F", "F;16NS",
b'\x83\x01\x01\x83', -31999, 0x0183)
self.assert_unpack("F", "F;32N", 4, 0x01020304, 0x05060708)
self.assert_unpack(
"F", "F;32NS",
b'\x83\x00\x00\x01\x01\x00\x00\x83',
-2097152000, 16777348)
def test_F_float(self):
self.assert_unpack(
"F", "F;32F", 4,
1.539989614439558e-36, 4.063216068939723e-34)
self.assert_unpack(
"F", "F;32BF", 4,
2.387939260590663e-38, 6.301941157072183e-36)
self.assert_unpack(
"F", "F;64F",
b'333333\xc3?\x00\x00\x00\x00\x00J\x93\xc0', # by struct.pack
0.15000000596046448, -1234.5)
self.assert_unpack(
"F", "F;64BF",
b'?\xc3333333\xc0\x93J\x00\x00\x00\x00\x00', # by struct.pack
0.15000000596046448, -1234.5)
if sys.byteorder == 'little':
self.assert_unpack(
"F", "F", 4,
1.539989614439558e-36, 4.063216068939723e-34)
self.assert_unpack(
"F", "F;32NF", 4,
1.539989614439558e-36, 4.063216068939723e-34)
self.assert_unpack(
"F", "F;64NF",
b'333333\xc3?\x00\x00\x00\x00\x00J\x93\xc0',
0.15000000596046448, -1234.5)
else:
self.assert_unpack(
"F", "F", 4,
2.387939260590663e-38, 6.301941157072183e-36)
self.assert_unpack(
"F", "F;32NF", 4,
2.387939260590663e-38, 6.301941157072183e-36)
self.assert_unpack(
"F", "F;64NF",
b'?\xc3333333\xc0\x93J\x00\x00\x00\x00\x00',
0.15000000596046448, -1234.5)
def test_I16(self):
self.assert_unpack("I;16", "I;16", 2, 0x0201, 0x0403, 0x0605)
self.assert_unpack("I;16B", "I;16B", 2, 0x0102, 0x0304, 0x0506)
self.assert_unpack("I;16L", "I;16L", 2, 0x0201, 0x0403, 0x0605)
self.assert_unpack("I;16", "I;12", 2, 0x0010, 0x0203, 0x0040)
if sys.byteorder == 'little':
self.assert_unpack("I;16", "I;16N", 2, 0x0201, 0x0403, 0x0605)
self.assert_unpack("I;16B", "I;16N", 2, 0x0201, 0x0403, 0x0605)
self.assert_unpack("I;16L", "I;16N", 2, 0x0201, 0x0403, 0x0605)
else:
self.assert_unpack("I;16", "I;16N", 2, 0x0102, 0x0304, 0x0506)
self.assert_unpack("I;16B", "I;16N", 2, 0x0102, 0x0304, 0x0506)
self.assert_unpack("I;16L", "I;16N", 2, 0x0102, 0x0304, 0x0506)
def test_value_error(self):
self.assertRaises(ValueError, self.assert_unpack, "L", "L", 0, 0)
self.assertRaises(ValueError, self.assert_unpack, "RGB", "RGB", 2, 0)
self.assertRaises(ValueError, self.assert_unpack, "CMYK", "CMYK", 2, 0)
|
1623164
|
from will.utils import warn
from will.backends.storage.redis_backend import RedisStorage
warn(
"""Deprecation - will.storage.redis_storage has been moved to will.backends.storage.redis_backend,
and will be removed in version 2.2. Please update your paths accordingly!"""
)
|
1623205
|
import unittest
from katas.beta.find_the_middle_element import gimme
class GimmeTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(gimme([2, 3, 1]), 0)
def test_equals_2(self):
self.assertEqual(gimme([5, 10, 14]), 1)
|
1623216
|
import megengine.module as M
import megengine.functional as F
class FlowHead(M.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = M.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = M.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = M.ReLU()
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
class SepConvGRU(M.Module):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convr1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convq1 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (1, 5), padding=(0, 2)
)
self.convz2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convr2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
self.convq2 = M.Conv2d(
hidden_dim + input_dim, hidden_dim, (5, 1), padding=(2, 0)
)
def forward(self, h, x):
# horizontal
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz1(hx))
r = F.sigmoid(self.convr1(hx))
q = F.tanh(self.convq1(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
# vertical
hx = F.concat([h, x], axis=1)
z = F.sigmoid(self.convz2(hx))
r = F.sigmoid(self.convr2(hx))
q = F.tanh(self.convq2(F.concat([r * h, x], axis=1)))
h = (1 - z) * h + z * q
return h
class BasicMotionEncoder(M.Module):
def __init__(self, cor_planes):
super(BasicMotionEncoder, self).__init__()
self.convc1 = M.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = M.Conv2d(256, 192, 3, padding=1)
self.convf1 = M.Conv2d(2, 128, 7, padding=3)
self.convf2 = M.Conv2d(128, 64, 3, padding=1)
self.conv = M.Conv2d(64 + 192, 128 - 2, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = F.concat([cor, flo], axis=1)
out = F.relu(self.conv(cor_flo))
return F.concat([out, flow], axis=1)
class BasicUpdateBlock(M.Module):
def __init__(self, hidden_dim, cor_planes, mask_size=8):
super(BasicUpdateBlock, self).__init__()
self.encoder = BasicMotionEncoder(cor_planes)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = M.Sequential(
M.Conv2d(128, 256, 3, padding=1),
M.ReLU(),
M.Conv2d(256, mask_size**2 * 9, 1, padding=0),
)
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = F.concat([inp, motion_features], axis=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
# scale mask to balence gradients
mask = 0.25 * self.mask(net)
return net, mask, delta_flow
|
1623220
|
from __future__ import absolute_import
import inspect
import types
import warnings
import weakref
from functools import partial
from logging import getLogger
from eventlet.event import Event
from nameko.exceptions import IncorrectSignature
_log = getLogger(__name__)
ENTRYPOINT_EXTENSIONS_ATTR = 'nameko_entrypoints'
class Extension(object):
""" Note that Extension.__init__ is called during :meth:`bind` as
well as at instantiation time, so avoid side-effects in this method.
Use :meth:`setup` instead.
Furthermore, :meth:`bind` and :func:`iter_extensions` use introspection
to find any subextensions that an extension may declare. Any descriptors
on the extension should expect to be called during introspection, which
happens between `ServiceContainer.__init__` and `ServiceContainer.setup`.
:attr:`Extension.container` gives access to the
:class:`~nameko.containers.ServiceContainer` instance to
which the Extension is bound, otherwise `None`.
"""
__params = None
container = None
def __new__(cls, *args, **kwargs):
inst = super(Extension, cls).__new__(cls)
inst.__params = (args, kwargs)
return inst
def setup(self):
""" Called on bound Extensions before the container starts.
Extensions should do any required initialisation here.
"""
def start(self):
""" Called on bound Extensions when the container has successfully
started.
This is only called after all other Extensions have successfully
returned from :meth:`Extension.setup`. If the Extension reacts
to external events, it should now start acting upon them.
"""
def stop(self):
""" Called when the service container begins to shut down.
Extensions should do any graceful shutdown here.
"""
def kill(self):
""" Called to stop this extension without grace.
Extensions should urgently shut down here. This means
stopping as soon as possible by omitting cleanup.
This may be distinct from ``stop()`` for certain dependencies.
For example, :class:`~messaging.QueueConsumer` tracks messages being
processed and pending message acks. Its ``kill`` implementation
discards these and disconnects from rabbit as soon as possible.
Extensions should not raise during kill, since the container
is already dying. Instead they should log what is appropriate and
swallow the exception to allow the container kill to continue.
"""
def bind(self, container):
""" Get an instance of this Extension to bind to `container`.
"""
def clone(prototype):
if prototype.is_bound():
raise RuntimeError('Cannot `bind` a bound extension.')
cls = type(prototype)
args, kwargs = prototype.__params
instance = cls(*args, **kwargs)
# instance.container must be a weakref to avoid a strong reference
# from value to key in the `shared_extensions` weakkey dict
# see test_extension_sharing.py: test_weakref
instance.container = weakref.proxy(container)
return instance
instance = clone(self)
# recurse over sub-extensions
for name, ext in inspect.getmembers(self, is_extension):
setattr(instance, name, ext.bind(container))
return instance
def is_bound(self):
return self.container is not None
def __repr__(self):
if not self.is_bound():
return '<{} [unbound] at 0x{:x}>'.format(
type(self).__name__, id(self))
return '<{} at 0x{:x}>'.format(
type(self).__name__, id(self))
class SharedExtension(Extension):
@property
def sharing_key(self):
return type(self)
def bind(self, container):
""" Bind implementation that supports sharing.
"""
# if there's already a matching bound instance, return that
shared = container.shared_extensions.get(self.sharing_key)
if shared:
return shared
instance = super(SharedExtension, self).bind(container)
# save the new instance
container.shared_extensions[self.sharing_key] = instance
return instance
class DependencyProvider(Extension):
attr_name = None
def bind(self, container, attr_name):
""" Get an instance of this Dependency to bind to `container` with
`attr_name`.
"""
instance = super(DependencyProvider, self).bind(container)
instance.attr_name = attr_name
self.attr_name = attr_name
return instance
def get_dependency(self, worker_ctx):
""" Called before worker execution. A DependencyProvider should return
an object to be injected into the worker instance by the container.
"""
def worker_result(self, worker_ctx, result=None, exc_info=None):
""" Called with the result of a service worker execution.
Dependencies that need to process the result should do it here.
This method is called for all `Dependency` instances on completion
of any worker.
Example: a database session dependency may flush the transaction
:Parameters:
worker_ctx : WorkerContext
See ``nameko.containers.ServiceContainer.spawn_worker``
"""
def worker_setup(self, worker_ctx):
""" Called before a service worker executes a task.
Dependencies should do any pre-processing here, raising exceptions
in the event of failure.
Example: ...
:Parameters:
worker_ctx : WorkerContext
See ``nameko.containers.ServiceContainer.spawn_worker``
"""
def worker_teardown(self, worker_ctx):
""" Called after a service worker has executed a task.
Dependencies should do any post-processing here, raising
exceptions in the event of failure.
Example: a database session dependency may commit the session
:Parameters:
worker_ctx : WorkerContext
See ``nameko.containers.ServiceContainer.spawn_worker``
"""
def __repr__(self):
if not self.is_bound():
return '<{} [unbound] at 0x{:x}>'.format(
type(self).__name__, id(self))
service_name = self.container.service_name
return '<{} [{}.{}] at 0x{:x}>'.format(
type(self).__name__, service_name, self.attr_name, id(self))
class ProviderCollector(object):
def __init__(self, *args, **kwargs):
self._providers = set()
self._providers_registered = False
self._last_provider_unregistered = Event()
super(ProviderCollector, self).__init__(*args, **kwargs)
def register_provider(self, provider):
self._providers_registered = True
_log.debug('registering provider %s for %s', provider, self)
self._providers.add(provider)
def unregister_provider(self, provider):
providers = self._providers
if provider not in self._providers:
return
_log.debug('unregistering provider %s for %s', provider, self)
providers.remove(provider)
if len(providers) == 0:
_log.debug('last provider unregistered for %s', self)
self._last_provider_unregistered.send()
def wait_for_providers(self):
""" Wait for any providers registered with the collector to have
unregistered.
Returns immediately if no providers were ever registered.
"""
if self._providers_registered:
_log.debug('waiting for providers to unregister %s', self)
self._last_provider_unregistered.wait()
_log.debug('all providers unregistered %s', self)
def stop(self):
""" Default `:meth:Extension.stop()` implementation for
subclasses using `ProviderCollector` as a mixin.
"""
self.wait_for_providers()
def register_entrypoint(fn, entrypoint):
descriptors = getattr(fn, ENTRYPOINT_EXTENSIONS_ATTR, None)
if descriptors is None:
descriptors = set()
setattr(fn, ENTRYPOINT_EXTENSIONS_ATTR, descriptors)
descriptors.add(entrypoint)
class Entrypoint(Extension):
method_name = None
def __init__(
self, expected_exceptions=(), sensitive_arguments=(), **kwargs
):
"""
:Parameters:
expected_exceptions : exception class or tuple of exception classes
Specify exceptions that may be caused by the caller (e.g. by
providing bad arguments). Saved on the entrypoint instance as
``entrypoint.expected_exceptions`` for later inspection by
other extensions, for example a monitoring system.
sensitive_arguments : string or tuple of strings
Mark an argument or part of an argument as sensitive. Saved on
the entrypoint instance as ``entrypoint.sensitive_arguments``
for later inspection by other extensions, for example a
logging system.
:seealso: :func:`nameko.utils.get_redacted_args`
"""
# backwards compat
sensitive_variables = kwargs.pop('sensitive_variables', ())
if sensitive_variables:
sensitive_arguments = sensitive_variables
warnings.warn(
"The `sensitive_variables` argument has been renamed to "
"`sensitive_arguments`. This warning will be removed in "
"version 2.9.0.", DeprecationWarning)
self.expected_exceptions = expected_exceptions
self.sensitive_arguments = sensitive_arguments
super(Entrypoint, self).__init__(**kwargs)
def bind(self, container, method_name):
""" Get an instance of this Entrypoint to bind to `container` with
`method_name`.
"""
instance = super(Entrypoint, self).bind(container)
instance.method_name = method_name
return instance
def check_signature(self, args, kwargs):
service_cls = self.container.service_cls
fn = getattr(service_cls, self.method_name)
try:
service_instance = None # fn is unbound
inspect.getcallargs(fn, service_instance, *args, **kwargs)
except TypeError as exc:
raise IncorrectSignature(str(exc))
@classmethod
def decorator(cls, *args, **kwargs):
def registering_decorator(fn, args, kwargs):
instance = cls(*args, **kwargs)
register_entrypoint(fn, instance)
return fn
if len(args) == 1 and isinstance(args[0], types.FunctionType):
# usage without arguments to the decorator:
# @foobar
# def spam():
# pass
return registering_decorator(args[0], args=(), kwargs={})
else:
# usage with arguments to the decorator:
# @foobar('shrub', ...)
# def spam():
# pass
return partial(registering_decorator, args=args, kwargs=kwargs)
def __repr__(self):
if not self.is_bound():
return '<{} [unbound] at 0x{:x}>'.format(
type(self).__name__, id(self))
service_name = self.container.service_name
return '<{} [{}.{}] at 0x{:x}>'.format(
type(self).__name__, service_name, self.method_name, id(self))
def is_extension(obj):
return isinstance(obj, Extension)
def is_dependency(obj):
return isinstance(obj, DependencyProvider)
def is_entrypoint(obj):
return isinstance(obj, Entrypoint)
def iter_extensions(extension):
""" Depth-first iterator over sub-extensions on `extension`.
"""
for _, ext in inspect.getmembers(extension, is_extension):
for item in iter_extensions(ext):
yield item
yield ext
|
1623337
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
import os
file_path = os.path.dirname(__file__)
sys.path.append(os.path.join(file_path, '../../super_module'))
sys.path.append(os.path.join(file_path, '../'))
sys.path.append(file_path)
import super_class
import mobilenetv2_super
import mobilenetv2_dense
class MobileNetV2(mobilenetv2_super.MobileNetV2, super_class.DeepSP_v2Model):
def __init__(self, ranks, alpha=1.0, num_classes=10, conv=super_class.Conv2dsp_v2, linear=super_class.Linearsp_v2):
super(MobileNetV2, self).__init__(alpha, num_classes, ranks, conv=super_class.Conv2dsp_v2, linear=super_class.Linearsp_v2)
def test():
net = mobilenetv2_dense.MobileNetV2()
x = torch.randn(2, 3, 32, 32)
y = net(x)
print(y.size())
print(net.get_ranks())
net1 = MobileNetV2(1.0, 10, net.get_ranks())
y = net1(x)
print(y.size())
if __name__=="__main__":
test()
|
1623344
|
from nitorch import spatial, io
from nitorch.core import py, dtypes
import torch
import os
def convert(inp, meta=None, dtype=None, casting='unsafe', format=None, output=None):
"""Convert a volume.
Parameters
----------
inp : str
A path to a volume file.
meta : sequence of (key, value)
List of metadata fields to set.
dtype : str or dtype, optional
Output data type
casting : {'unsafe', 'rescale', 'rescale_zero'}, default='unsafe'
Casting method
format : {'nii', 'nii.gz', 'mgh', 'mgz'}, optional
Output format
"""
meta = dict(meta or {})
if dtype:
meta['dtype'] = dtype
fname = inp
f = io.volumes.map(fname)
d = f.data(numpy=True)
dir, base, ext = py.fileparts(fname)
if format:
ext = format
if ext == 'nifti':
ext = 'nii'
if ext[0] != '.':
ext = '.' + ext
output = output or '{dir}{sep}{base}{ext}'
output = output.format(dir=dir or '.', sep=os.sep, base=base, ext=ext)
odtype = meta.get('dtype', None) or f.dtype
if ext in ('.mgh', '.mgz'):
from nibabel.freesurfer.mghformat import _dtdefs
odtype = dtypes.dtype(odtype)
mgh_dtypes = [dtypes.dtype(dt[2]) for dt in _dtdefs]
for mgh_dtype in mgh_dtypes:
if odtype <= mgh_dtype:
odtype = mgh_dtype
break
odtype = odtype.numpy
meta['dtype'] = odtype
io.save(d, output, like=f, casting=casting, **meta)
|
1623357
|
import pygame as pg
class CoinDebris(object):
"""
Coin that appears when you hit the question block.
"""
def __init__(self, x_pos, y_pos):
self.rect = pg.Rect(x_pos, y_pos, 16, 28)
self.y_vel = -2
self.y_offset = 0
self.moving_up = True
self.current_image = 0
self.image_tick = 0
self.images = [
pg.image.load('images/coin_an0.png').convert_alpha(),
pg.image.load('images/coin_an1.png').convert_alpha(),
pg.image.load('images/coin_an2.png').convert_alpha(),
pg.image.load('images/coin_an3.png').convert_alpha()
]
def update(self, core):
self.image_tick += 1
if self.image_tick % 15 == 0:
self.current_image += 1
if self.current_image == 4:
self.current_image = 0
self.image_tick = 0
if self.moving_up:
self.y_offset += self.y_vel
self.rect.y += self.y_vel
if self.y_offset < -50:
self.moving_up = False
self.y_vel = -self.y_vel
else:
self.y_offset += self.y_vel
self.rect.y += self.y_vel
if self.y_offset == 0:
core.get_map().debris.remove(self)
def render(self, core):
core.screen.blit(self.images[self.current_image], core.get_map().get_camera().apply(self))
|
1623370
|
from mock import MagicMock
import pyaem
from pyaem import bagofrequests as bag
import unittest
from .util import HandlersMatcher
class TestPackageManager(unittest.TestCase):
def setUp(self):
self.package_manager = pyaem.packagemanager.PackageManager('http://localhost:4502', debug=True)
bag.request = MagicMock()
bag.download_file = MagicMock()
bag.upload_file = MagicMock()
def test_init(self):
self.assertEqual(self.package_manager.url, 'http://localhost:4502')
self.assertEqual(self.package_manager.kwargs['debug'], True)
self.assertTrue(401 in self.package_manager.handlers)
self.assertTrue(405 in self.package_manager.handlers)
def test_update_package(self):
_self = self
class UpdatePackageHandlerMatcher(HandlersMatcher):
def __eq__(self, handlers):
result = handlers[200](None)
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, 'Package updated')
_self.assertEquals(result.response, None)
return super(UpdatePackageHandlerMatcher, self).__eq__(handlers)
self.package_manager.update_package('mygroup', 'mypackage', '1.2.3', foo='bar')
bag.request.assert_called_once_with(
'get',
'http://localhost:4502/crx/packmgr/update.jsp',
{'packageName': 'mypackage',
'groupName': 'mygroup',
'version': '1.2.3',
'_charset_': 'utf-8',
'path': '/etc/packages/mygroup/mypackage-1.2.3.zip',
'foo': 'bar'},
UpdatePackageHandlerMatcher([200, 401, 405]),
debug=True)
def test_download_package(self):
_self = self
class DownloadPackageHandlerMatcher(HandlersMatcher):
def __eq__(self, handlers):
result = handlers[200](None, file='/tmp/somepath/mypackage-1.2.3.zip')
_self.assertEquals(result.is_success(), True)
_self.assertEquals(result.message, '/tmp/somepath/mypackage-1.2.3.zip downloaded')
_self.assertEquals(result.response, None)
return super(DownloadPackageHandlerMatcher, self).__eq__(handlers)
self.package_manager.download_package('mygroup', 'mypackage', '1.2.3', '/tmp/somepath', foo='bar')
bag.download_file.assert_called_once_with(
'http://localhost:4502/etc/packages/mygroup/mypackage-1.2.3.zip',
{'foo': 'bar'},
DownloadPackageHandlerMatcher([200, 401, 405]),
file='/tmp/somepath/mypackage-1.2.3.zip',
debug=True)
if __name__ == '__main__':
unittest.main()
|
1623417
|
import pytest
import pglet
from pglet import Link, Text
'''
def test_button_primary_must_be_bool():
with pytest.raises(Exception):
Button(id="button1", text="My button", primary="1")
'''
def test_link_add():
l = Link(value="search", url="http://google.com", align="left", new_window=True)
assert isinstance(l, pglet.Control)
assert isinstance(l, pglet.Link)
assert l.get_cmd_str() == ('link align="left" newwindow="true" url="http://google.com" value="search"'), "Test failed"
def test_link_with_controls():
l = Link(value='Visit google', url='https://google.com', pre=True, align='right', width='100', size='large1',
title='Link title', controls=[
Text(value='LinkText1'),
Text(value='LinkText2')
])
assert isinstance(l, pglet.Control)
assert isinstance(l, pglet.Link)
assert l.get_cmd_str() == ('link align="right" pre="true" size="large1" title="Link title" '
'url="https://google.com" value="Visit google" width="100"\n'
' text value="LinkText1"\n'
' text value="LinkText2"'), "Test failed"
|
1623425
|
r"""
Solve Ginzburg-Landau equation on (-50, 50)x(-50, 50) with periodic bcs
u_t = div(grad(u)) + u - (1+1.5i)*u*|u|**2 (1)
Use Fourier basis V and find u in VxV such that
(v, u_t) = (v, div(grad(u))+ u) - (v, (1+1.5i)*u*|u|**2) for all v in VxV
"""
from sympy import symbols, exp
import matplotlib.pyplot as plt
from mpi4py_fft import generate_xdmf, fftw
from shenfun import inner, div, grad, TestFunction, TrialFunction, \
TensorProductSpace, Array, Function, ETDRK4, HDF5File, FunctionSpace, comm
# Use sympy to set up initial condition
x, y = symbols("x,y", real=True)
#ue = (1j*x + y)*exp(-0.03*(x**2+y**2))
ue = (x + y)*exp(-0.03*(x**2+y**2))
# Size of discretization
N = (129, 129)
K0 = FunctionSpace(N[0], 'F', dtype='D', domain=(-50, 50))
K1 = FunctionSpace(N[1], 'F', dtype='D', domain=(-50, 50))
T = TensorProductSpace(comm, (K0, K1), **{'planner_effort': 'FFTW_MEASURE'})
u = TrialFunction(T)
v = TestFunction(T)
# Try to import wisdom. Note that wisdom must be imported after creating the Bases (that initializes the wisdom somehow?)
try:
fftw.import_wisdom('GL.wisdom')
print('Importing wisdom')
except:
print('No wisdom imported')
X = T.local_mesh(True)
U = Array(T, buffer=ue)
U_hat = Function(T)
padding_factor = 1.5
#initialize
U_hat = T.forward(U, U_hat)
def LinearRHS(self, u, **par):
return div(grad(u)) + u
def NonlinearRHS(self, u, u_hat, rhs, **par):
global Up, Tp
rhs.fill(0)
Up = u_hat.backward(padding_factor=padding_factor)
Up[:] = -(1+1.5j)*Up*abs(Up)**2
rhs = Up.forward(rhs)
return rhs
plt.figure()
image = plt.contourf(X[0], X[1], U.real, 100)
plt.draw()
plt.pause(1e-6)
count = 0
def update(self, u, u_hat, t, tstep, plot_tstep, write_tstep, file, **params):
global count
if tstep % plot_tstep == 0 and plot_tstep > 0:
u = u_hat.backward(u)
image.ax.clear()
image.ax.contourf(X[0], X[1], u.real, 100)
plt.pause(1e-6)
count += 1
#plt.savefig('Ginzburg_Landau_{}_{}.png'.format(N[0], count))
if tstep % write_tstep[0] == 0:
u = u_hat.backward(u)
file.write(tstep, write_tstep[1])
if __name__ == '__main__':
file0 = HDF5File("Ginzburg_Landau_{}.h5".format(N[0]), mode='w')
par = {'plot_tstep': 100,
'write_tstep': (50, {'u': [U.real]}),
'file': file0}
t = 0.0
dt = 0.01
end_time = 100
integrator = ETDRK4(T, L=LinearRHS, N=NonlinearRHS, update=update, **par)
integrator.setup(dt)
U_hat = integrator.solve(U, U_hat, dt, (0, end_time))
if comm.Get_rank() == 0:
generate_xdmf("Ginzburg_Landau_{}.h5".format(N[0]))
fftw.export_wisdom('GL.wisdom')
|
1623497
|
import pytest
import numpy as np
import time
from ding.utils.time_helper import build_time_helper, WatchDog, TimeWrapperTime, EasyTimer
@pytest.mark.unittest
class TestTimeHelper:
def test_naive(self):
class NaiveObject(object):
pass
cfg = NaiveObject()
setattr(cfg, 'common', NaiveObject())
setattr(cfg.common, 'time_wrapper_type', 'time')
with pytest.raises(RuntimeError):
time_handle = build_time_helper()
with pytest.raises(KeyError):
build_time_helper(cfg=None, wrapper_type="not_implement")
time_handle = build_time_helper(cfg)
time_handle = build_time_helper(wrapper_type='cuda')
# wrapper_type='cuda' but cuda is not available
assert issubclass(time_handle, TimeWrapperTime)
time_handle = build_time_helper(wrapper_type='time')
@time_handle.wrapper
def func1(x):
return x + 1
def func2(x):
return x + 1
# usage 1
ret, t = func1(3)
assert np.isscalar(t)
assert func1(4)[0] == func2(4)
# usage 2
time_handle.start_time()
_ = func2(3)
t = time_handle.end_time()
assert np.isscalar(t)
#test time_lag and restart
time_handle.start_time()
time.sleep(0.5)
time_handle.start_time()
time.sleep(1)
t = time_handle.end_time()
assert np.isscalar(t)
# time_lag is bigger than 1e-3
# assert abs(t-1) < 1e-3
assert abs(t - 1) < 1e-2
timer = EasyTimer()
with timer:
tmp = np.random.random(size=(4, 100))
tmp = tmp ** 2
value = timer.value
assert isinstance(value, float)
@pytest.mark.unittest
class TestWatchDog:
def test_naive(self):
watchdog = WatchDog(3)
watchdog.start()
time.sleep(2)
with pytest.raises(TimeoutError):
time.sleep(2)
watchdog.stop()
|
1623517
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
if self._polygon_must_be_fixed(geom):
geom = self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
if any(isinstance(g, Polygon) and self._polygon_must_be_fixed(g) for g in geom):
geom = self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
@staticmethod
def _polygon_must_be_fixed(poly):
return (
not poly.empty and
(
not poly.exterior_ring.is_counterclockwise or
any(x.is_counterclockwise for x in poly)
)
)
@classmethod
def _fix_polygon(cls, poly, clone=True):
"""Fix single polygon orientation as described in __init__()."""
if clone:
poly = poly.clone()
if not poly.exterior_ring.is_counterclockwise:
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if poly[i].is_counterclockwise:
poly[i] = list(reversed(poly[i]))
return poly
@classmethod
def _fix_geometry_collection(cls, coll):
"""
Fix polygon orientations in geometry collections as described in
__init__().
"""
coll = coll.clone()
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = cls._fix_polygon(geom, clone=False)
return coll
|
1623529
|
import customers as c
import util as u
matrix = [[]]
silhouettesList = []
centroids = []
def __init__(mat, cents):
global matrix
matrix = mat
global centroids
centroids = cents
def averageSilhouettes(clusters, matrix, centroids):
__init__(matrix, centroids)
silhouettes = 0.0
for i in range(0, len(clusters)):
center = centroids[i]
neighbor = neighboringCentroid(clusters[i], i)
s = 0.0
for j in range(0, len(clusters[i])):
point = customerPoint(clusters[i][j])
s += silhouette(point, center, neighbor)
clustSil = s/len(clusters[i])
silhouettesList.append(clustSil)
silhouettes += s
return silhouettes/len(c.customers)
def silhouette(point, centroid, neighbor):
a = u.dist(point, centroid)
b = u.dist(point, neighbor)
sil = (b-a)/max(a,b)
return sil
def neighboringCentroid(cluster, index):
amin = len(customerPoint(cluster[0]))
neighborIndex = -.1
for i in range(0,len(centroids)):
if i == index:
continue
dist = u.dist(centroids[i], centroids[index])
if dist < amin:
amin = dist
neighborIndex = i
neighbor = centroids[neighborIndex]
return neighbor
def customerPoint(customer):
return matrix[c.customersMap[customer.name]]
|
1623556
|
import taco.common.exceptions
class AutoScalerWrapperException(taco.common.exceptions.DataDictException):
pass
# --- Table ---
class TargetRegistrationException(AutoScalerWrapperException):
def __init__(self, service_namespace, resource_id, exc):
super().__init__('Failed to register auto scaler',
data_dict={
'resource_id': resource_id,
'service_namespace': service_namespace
}, exc=exc)
class PutPolicyException(AutoScalerWrapperException):
def __init__(self, service_namespace, resource_id, exc):
super().__init__('Failed to register auto scaler',
data_dict={
'resource_id': resource_id,
'service_namespace': service_namespace
}, exc=exc)
|
1623614
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from kolibri.core.device.models import DeviceSettings
from kolibri.core.utils.cache import process_cache as cache
class DeviceSettingsTestCase(TestCase):
def test_singleton(self):
cache.clear()
DeviceSettings.objects.create()
with self.assertRaises(ValidationError):
DeviceSettings.objects.create()
def test_get_setting(self):
cache.clear()
ds = DeviceSettings.objects.create()
ds2 = DeviceSettings.objects.get()
self.assertEqual(ds, ds2)
def test_delete_setting(self):
cache.clear()
ds = DeviceSettings.objects.create()
ds.delete()
with self.assertRaises(DeviceSettings.DoesNotExist):
DeviceSettings.objects.get()
def test_delete_setting_queryset(self):
cache.clear()
DeviceSettings.objects.create()
DeviceSettings.objects.all().delete()
with self.assertRaises(DeviceSettings.DoesNotExist):
DeviceSettings.objects.get()
def test_delete_setting_manager(self):
cache.clear()
DeviceSettings.objects.create()
DeviceSettings.objects.delete()
with self.assertRaises(DeviceSettings.DoesNotExist):
DeviceSettings.objects.get()
|
1623624
|
import os
class IMDB(object):
def __init__(self, benchmark_name, image_subset_name, dataset_path, cache_path=None):
"""
basic information about an image database
:param name: name of image database will be used for any output
:param dataset_path: dataset path store images and image lists
:param cache_path: store cache and proposal data
"""
self.benchmark_name = benchmark_name
self.image_subset_name = image_subset_name
self.name = benchmark_name + '_' + image_subset_name
self.dataset_path = dataset_path
if cache_path:
self._cache_path = cache_path
else:
self._cache_path = dataset_path
# abstract attributes
self.num_images = 0
@property
def cache_path(self):
"""
make a directory to store all caches
:return: cache path
"""
cache_path = os.path.join(self._cache_path,'{}_{}_cache'.format(self.benchmark_name, self.image_subset_name))
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return cache_path
|
1623636
|
import rematbal.matbal as mb
import numpy as np
from scipy.optimize import fsolve
import pandas as pd
def mbal_inner_calc(dict, P, Pres_calc, We, aquifer_pres, step):
Np, Wp, Gp, N, Wei, pvt_oil_pressure, pvt_oil_Bo, pvt_oil_Bg, pvt_oil_Rs, Rsb, \
Bti, Bgi, Pi, m, Boi, cw, Swi, cf, Rsi, Bw, Winj, Bwinj, Ginj, J, \
ts, VEH_aq_type, td_array, VEH_dp_array, r, rr, aq_type = mbal_setup(dict)
Bo = np.interp(P, pvt_oil_pressure, pvt_oil_Bo)
Bg = np.interp(P, pvt_oil_pressure, pvt_oil_Bg)
Bginj = Bg
Rs = np.interp(P, pvt_oil_pressure, pvt_oil_Rs)
Bt = mb.formation_total_volume_factor(Bo, Bg, Rsb, Rs)
Eo = mb.dissolved_oil_and_gas_expansion(Bt, Bti)
Eg = mb.gas_cap_expansion(Bti, Bg, Bgi)
dP = Pi - P
Efw = mb.pore_volume_reduction_connate_water_expansion(m, Boi, cw, Swi, cf, dP)
Npx = Np[step]
Wpx = Wp[step]
Gpx = Gp[step]
Winjx = Winj[step]
Ginjx = Ginj[step]
F, produced_oil_and_gas, produced_water, injected_gas, injected_water = mb.production_injection_balance(Npx, Bt, Rs,
Rsi, Bg,
Wpx,
Bw, Winjx,
Bwinj, Ginjx,
Bginj, Gpx)
if aq_type == 'VEH':
Wex, VEH_avg_pressure, VEH_dp_array = mb.VEH_aquifer_influx(VEH_aq_type, step, ts, td_array, VEH_dp_array,
r, rr, P, Pi, VEH_avg_pressure)
if aq_type == 'Fetkovich':
Wex, aq_pres = aquifer_influx(step, P, Wei, We, ts, Pres_calc, Pi, J, aquifer_pres)
aquifer_pres[step] = aq_pres
We[step] = Wex
return F, Eo, m, Eg, Efw, We, aquifer_pres, Bw, Bti, N
def obj_funtion2(P, *data):
dict = data[0]
Pres_calc = data[1]
We = data[2]
aq_pres = data[3]
step = data[4]
F, Eo, m, Eg, Efw, We, aq_pres, Bw, Bti, N = mbal_inner_calc(dict, P, Pres_calc, We, aq_pres, step)
Wex = We[step]
Ncalc = mb.oil_in_place(F, Eo, m, Eg, Efw, Wex, Bw, Bti)
of = (N - Ncalc)
return of
def pressure_calculation(data, Pres_calc):
step = data[4]
x0 = Pres_calc[step - 1] - 10.0
res = fsolve(obj_funtion2, x0, args=data)
return res
def aquifer_influx(step, P, Wei, We, ts, Pres_calc, Pi, J, aquifer_pres):
We_prev = We[step - 1]
ts_prev = ts[step - 1]
tsx = ts[step]
avg_pres = (Pres_calc[step - 1] + P) / 2
aq_pres = aquifer_pressure(step, Wei, We, aquifer_pres, Pi)
# print(step,aq_pres)
Wex = (Wei / Pi) * (aq_pres - avg_pres) * (1 - np.exp(-J * Pi * (tsx - ts_prev) / Wei))
Wex = We_prev + Wex
return Wex, aq_pres
def aquifer_pressure(step, Wei, We, aquifer_pres, Pi):
We_prev = We[step - 1]
if step == 1:
aq_pres = Pi
else:
aq_pres = Pi * (1 - We_prev / (Wei))
return aq_pres
def mbal_setup(dict):
df_prod = dict['df_prod']
dict_tank = dict['dict_tank']
dict_pvtmaster = dict['dict_pvtmaster']
df_pvt_gas = dict['df_pvt_gas']
df_pvt_oil = dict['df_pvt_oil']
dates = df_prod['datestamp']
ts = pd.to_numeric(dates - dates.min()) / 864e11
Np = df_prod['np']
Gp = df_prod['gp']
Wp = df_prod['wp']
N = float(dict_tank['initial_inplace'])
Swi = float(dict_tank['swi'])
cw = float(dict_tank['cw'])
cf = float(dict_tank['cf'])
m = float(dict_tank['initial_gascap'])
Winj = df_prod['wi']
Winj = Winj.fillna(0)
Ginj = df_prod['gi']
Ginj = Ginj.fillna(0)
#####General PVT
Rsi = dict_pvtmaster['gor'] # scf/stb
try:
aq_type = dict_tank['aq_type']
except:
aq_type = "Fetkovich"
VEH_aq_type = ""
r = ""
rr = ""
td_array = ""
VEH_dp_array = ""
if aq_type == 'Fetkovich':
Wei = float(dict_tank['wei'])
J = float(dict_tank['J'])
if aq_type == 'VEH':
VEH_dp_array = [None] * len(Np)
VEH_aq_type = dict_tank['VEH_aq_type']
r = dict_tank['r']
rr = dict_tank['rr']
td_array = mb.VEH_td(VEH_aq_type, dict_tank['k'], ts, dict_tank['poro'], dict_tank['visc'], dict_tank['ct'], rr,
dict_tank['La'])
else:
Wei = float(dict_tank['wei'])
J = float(dict_tank['J'])
Pi = float(dict_tank['initial_pressure'])
Boi = dict_tank['Boi']
Bgi = dict_tank['Bgi']
Rsb = dict_pvtmaster['gor']
Bti = mb.formation_total_volume_factor(Boi, Bgi, Rsb, Rsi)
#####Water PVT
Bw = 1.0 # dict_tank['Bw']
Bwinj = 1.0
#####Oil PVT
pvt_oil_pressure = df_pvt_oil['pressure']
pvt_oil_Bo = df_pvt_oil['oil_fvf']
pvt_oil_Rs = df_pvt_oil['solution_gas']
#####Gas PVT
pvt_gas_pressure = df_pvt_gas['pressure']
pvt_gas_Bg = df_pvt_gas['gas_fvf']
pvt_gas_Bg = pvt_gas_Bg / 1000
arr = np.array(pvt_oil_pressure)
interpol = lambda P: np.interp(P, pvt_gas_pressure, pvt_gas_Bg)
pvt_oil_Bg = interpol(arr)
aquifer_pres = [None] * len(Np)
return Np, Wp, Gp, N, Wei, pvt_oil_pressure, pvt_oil_Bo, pvt_oil_Bg, pvt_oil_Rs, Rsb, \
Bti, Bgi, Pi, m, Boi, cw, Swi, cf, Rsi, Bw, Winj, Bwinj, Ginj, J, \
ts, VEH_aq_type, td_array, VEH_dp_array, r, rr, aq_type
def eval_mbal_input(dict):
Pres_calc = []
df_prod = dict['df_prod']
Np = df_prod['np']
We = [None] * len(Np)
aquifer_pres = [None] * len(Np)
dict_tank = dict['dict_tank']
Pi = float(dict_tank['initial_pressure'])
for x in range(len(Np)):
if x == 0:
aquifer_pres[x] = Pi
We[x] = 0.0
Pres_calc.append(Pi)
else:
data = (dict, Pres_calc, We, aquifer_pres, x)
Pres_calc.append(pressure_calculation(data, Pres_calc)[0])
dict['Pres_calc'] = Pres_calc
solution_set = drive_indices(dict)
return solution_set
def drive_indices(dict):
solution_set = pd.DataFrame()
DDI = []
SDI = []
WDI = []
CDI = []
Pres_calc = dict['Pres_calc']
Ncalc_array = []
df_prod = dict['df_prod']
Np = df_prod['np']
We = [None] * len(Np)
aquifer_pres = [None] * len(Np)
Wp = df_prod['wp']
dict_tank = dict['dict_tank']
Pi = float(dict_tank['initial_pressure'])
N = float(dict_tank['initial_inplace'])
Boi = dict_tank['Boi']
Bgi = dict_tank['Bgi']
dates = df_prod['datestamp']
ts = pd.to_numeric(dates - dates.min()) / 864e11
for x in range(len(Np)):
if x == 0:
We[x] = 0.0
aquifer_pres[x] = Pi
DDI.append(0)
SDI.append(0)
WDI.append(0)
CDI.append(0)
Ncalc_array.append(N)
else:
F, Eo, m, Eg, Efw, We, aquifer_pres, Bw, Bti, N = mbal_inner_calc(dict, Pres_calc[x], Pres_calc, We, aquifer_pres, x)
Ncalc = mb.oil_in_place(F, Eo, m, Eg, Efw, We[x], Bw, Bti)
DDI.append(Ncalc * Eo / F)
SDI.append(Ncalc * m * Eg * (Boi / Bgi) / F)
WDI.append((We[x] * Bw - Wp[x] * Bw) / F)
CDI.append(Ncalc * (1 + m) * Efw * Boi / F)
Ncalc_array.append(Ncalc)
solution_set['ts'] = ts
solution_set['pres_calc'] = Pres_calc
solution_set['ddi'] = DDI
solution_set['sdi'] = SDI
solution_set['wdi'] = WDI
solution_set['cdi'] = CDI
solution_set['we'] = We
solution_set['aquifer_pres'] = aquifer_pres
solution_set['oip'] = Ncalc_array
return solution_set
|
1623649
|
import ast
import csv
import io
import json
import logging
import os
from datetime import datetime
from typing import Dict, Any
import xmind
from dateutil.parser import parse as parse_datetime_str
from jinja2 import Template
from .checking import SUPPORTED_IDS
from .result import QueryStatus
from .sites import MaigretDatabase
from .utils import is_country_tag, CaseConverter, enrich_link_str
SUPPORTED_JSON_REPORT_FORMATS = [
"simple",
"ndjson",
]
"""
UTILS
"""
def filter_supposed_data(data):
# interesting fields
allowed_fields = ["fullname", "gender", "location", "age"]
filtered_supposed_data = {
CaseConverter.snake_to_title(k): v[0]
for k, v in data.items()
if k in allowed_fields
}
return filtered_supposed_data
def sort_report_by_data_points(results):
return dict(
sorted(
results.items(),
key=lambda x: len(
(x[1].get('status') and x[1]['status'].ids_data or {}).keys()
),
reverse=True,
)
)
"""
REPORTS SAVING
"""
def save_csv_report(filename: str, username: str, results: dict):
with open(filename, "w", newline="", encoding="utf-8") as f:
generate_csv_report(username, results, f)
def save_txt_report(filename: str, username: str, results: dict):
with open(filename, "w", encoding="utf-8") as f:
generate_txt_report(username, results, f)
def save_html_report(filename: str, context: dict):
template, _ = generate_report_template(is_pdf=False)
filled_template = template.render(**context)
with open(filename, "w") as f:
f.write(filled_template)
def save_pdf_report(filename: str, context: dict):
template, css = generate_report_template(is_pdf=True)
filled_template = template.render(**context)
# moved here to speed up the launch of Maigret
from xhtml2pdf import pisa
with open(filename, "w+b") as f:
pisa.pisaDocument(io.StringIO(filled_template), dest=f, default_css=css)
def save_json_report(filename: str, username: str, results: dict, report_type: str):
with open(filename, "w", encoding="utf-8") as f:
generate_json_report(username, results, f, report_type=report_type)
class MaigretGraph:
other_params = {'size': 10, 'group': 3}
site_params = {'size': 15, 'group': 2}
username_params = {'size': 20, 'group': 1}
def __init__(self, graph):
self.G = graph
def add_node(self, key, value):
node_name = f'{key}: {value}'
params = self.other_params
if key in SUPPORTED_IDS:
params = self.username_params
elif value.startswith('http'):
params = self.site_params
self.G.add_node(node_name, title=node_name, **params)
if value != value.lower():
normalized_node_name = self.add_node(key, value.lower())
self.link(node_name, normalized_node_name)
return node_name
def link(self, node1_name, node2_name):
self.G.add_edge(node1_name, node2_name, weight=2)
def save_graph_report(filename: str, username_results: list, db: MaigretDatabase):
# moved here to speed up the launch of Maigret
import networkx as nx
G = nx.Graph()
graph = MaigretGraph(G)
for username, id_type, results in username_results:
username_node_name = graph.add_node(id_type, username)
for website_name in results:
dictionary = results[website_name]
# TODO: fix no site data issue
if not dictionary:
continue
if dictionary.get("is_similar"):
continue
status = dictionary.get("status")
if not status: # FIXME: currently in case of timeout
continue
if dictionary["status"].status != QueryStatus.CLAIMED:
continue
site_fallback_name = dictionary.get(
'url_user', f'{website_name}: {username.lower()}'
)
# site_node_name = dictionary.get('url_user', f'{website_name}: {username.lower()}')
site_node_name = graph.add_node('site', site_fallback_name)
graph.link(username_node_name, site_node_name)
def process_ids(parent_node, ids):
for k, v in ids.items():
if k.endswith('_count') or k.startswith('is_') or k.endswith('_at'):
continue
if k in 'image':
continue
v_data = v
if v.startswith('['):
try:
v_data = ast.literal_eval(v)
except Exception as e:
logging.error(e)
# value is a list
if isinstance(v_data, list):
list_node_name = graph.add_node(k, site_fallback_name)
for vv in v_data:
data_node_name = graph.add_node(vv, site_fallback_name)
graph.link(list_node_name, data_node_name)
add_ids = {
a: b for b, a in db.extract_ids_from_url(vv).items()
}
if add_ids:
process_ids(data_node_name, add_ids)
else:
# value is just a string
# ids_data_name = f'{k}: {v}'
# if ids_data_name == parent_node:
# continue
ids_data_name = graph.add_node(k, v)
# G.add_node(ids_data_name, size=10, title=ids_data_name, group=3)
graph.link(parent_node, ids_data_name)
# check for username
if 'username' in k or k in SUPPORTED_IDS:
new_username_node_name = graph.add_node('username', v)
graph.link(ids_data_name, new_username_node_name)
add_ids = {k: v for v, k in db.extract_ids_from_url(v).items()}
if add_ids:
process_ids(ids_data_name, add_ids)
if status.ids_data:
process_ids(site_node_name, status.ids_data)
nodes_to_remove = []
for node in G.nodes:
if len(str(node)) > 100:
nodes_to_remove.append(node)
[G.remove_node(node) for node in nodes_to_remove]
# moved here to speed up the launch of Maigret
from pyvis.network import Network
nt = Network(notebook=True, height="750px", width="100%")
nt.from_nx(G)
nt.show(filename)
def get_plaintext_report(context: dict) -> str:
output = (context['brief'] + " ").replace('. ', '.\n')
interests = list(map(lambda x: x[0], context.get('interests_tuple_list', [])))
countries = list(map(lambda x: x[0], context.get('countries_tuple_list', [])))
if countries:
output += f'Countries: {", ".join(countries)}\n'
if interests:
output += f'Interests (tags): {", ".join(interests)}\n'
return output.strip()
"""
REPORTS GENERATING
"""
def generate_report_template(is_pdf: bool):
"""
HTML/PDF template generation
"""
def get_resource_content(filename):
return open(os.path.join(maigret_path, "resources", filename)).read()
maigret_path = os.path.dirname(os.path.realpath(__file__))
if is_pdf:
template_content = get_resource_content("simple_report_pdf.tpl")
css_content = get_resource_content("simple_report_pdf.css")
else:
template_content = get_resource_content("simple_report.tpl")
css_content = None
template = Template(template_content)
template.globals["title"] = CaseConverter.snake_to_title # type: ignore
template.globals["detect_link"] = enrich_link_str # type: ignore
return template, css_content
def generate_report_context(username_results: list):
brief_text = []
usernames = {}
extended_info_count = 0
tags: Dict[str, int] = {}
supposed_data: Dict[str, Any] = {}
first_seen = None
# moved here to speed up the launch of Maigret
import pycountry
for username, id_type, results in username_results:
found_accounts = 0
new_ids = []
usernames[username] = {"type": id_type}
for website_name in results:
dictionary = results[website_name]
# TODO: fix no site data issue
if not dictionary:
continue
if dictionary.get("is_similar"):
continue
status = dictionary.get("status")
if not status: # FIXME: currently in case of timeout
continue
if status.ids_data:
dictionary["ids_data"] = status.ids_data
extended_info_count += 1
# detect first seen
created_at = status.ids_data.get("created_at")
if created_at:
if first_seen is None:
first_seen = created_at
else:
try:
known_time = parse_datetime_str(first_seen)
new_time = parse_datetime_str(created_at)
if new_time < known_time:
first_seen = created_at
except Exception as e:
logging.debug(
"Problems with converting datetime %s/%s: %s",
first_seen,
created_at,
str(e),
)
for k, v in status.ids_data.items():
# suppose target data
field = "fullname" if k == "name" else k
if field not in supposed_data:
supposed_data[field] = []
supposed_data[field].append(v)
# suppose country
if k in ["country", "locale"]:
try:
if is_country_tag(k):
tag = pycountry.countries.get(alpha_2=v).alpha_2.lower()
else:
tag = pycountry.countries.search_fuzzy(v)[
0
].alpha_2.lower()
# TODO: move countries to another struct
tags[tag] = tags.get(tag, 0) + 1
except Exception as e:
logging.debug(
"Pycountry exception: %s", str(e), exc_info=True
)
new_usernames = dictionary.get("ids_usernames")
if new_usernames:
for u, utype in new_usernames.items():
if u not in usernames:
new_ids.append((u, utype))
usernames[u] = {"type": utype}
if status.status == QueryStatus.CLAIMED:
found_accounts += 1
dictionary["found"] = True
else:
continue
# ignore non-exact search results
if status.tags:
for t in status.tags:
tags[t] = tags.get(t, 0) + 1
brief_text.append(
f"Search by {id_type} {username} returned {found_accounts} accounts."
)
if new_ids:
ids_list = []
for u, t in new_ids:
ids_list.append(f"{u} ({t})" if t != "username" else u)
brief_text.append("Found target's other IDs: " + ", ".join(ids_list) + ".")
brief_text.append(f"Extended info extracted from {extended_info_count} accounts.")
brief = " ".join(brief_text).strip()
tuple_sort = lambda d: sorted(d, key=lambda x: x[1], reverse=True)
if "global" in tags:
# remove tag 'global' useless for country detection
del tags["global"]
first_username = username_results[0][0]
countries_lists = list(filter(lambda x: is_country_tag(x[0]), tags.items()))
interests_list = list(filter(lambda x: not is_country_tag(x[0]), tags.items()))
filtered_supposed_data = filter_supposed_data(supposed_data)
return {
"username": first_username,
# TODO: return brief list
"brief": brief,
"results": username_results,
"first_seen": first_seen,
"interests_tuple_list": tuple_sort(interests_list),
"countries_tuple_list": tuple_sort(countries_lists),
"supposed_data": filtered_supposed_data,
"generated_at": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
}
def generate_csv_report(username: str, results: dict, csvfile):
writer = csv.writer(csvfile)
writer.writerow(
["username", "name", "url_main", "url_user", "exists", "http_status"]
)
for site in results:
# TODO: fix the reason
status = 'Unknown'
if "status" in results[site]:
status = str(results[site]["status"].status)
writer.writerow(
[
username,
site,
results[site].get("url_main", ""),
results[site].get("url_user", ""),
status,
results[site].get("http_status", 0),
]
)
def generate_txt_report(username: str, results: dict, file):
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
# TODO: fix no site data issue
if not dictionary:
continue
if (
dictionary.get("status")
and dictionary["status"].status == QueryStatus.CLAIMED
):
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write(f"Total Websites Username Detected On : {exists_counter}")
def generate_json_report(username: str, results: dict, file, report_type):
is_report_per_line = report_type.startswith("ndjson")
all_json = {}
for sitename in results:
site_result = results[sitename]
# TODO: fix no site data issue
if not site_result or not site_result.get("status"):
continue
if site_result["status"].status != QueryStatus.CLAIMED:
continue
data = dict(site_result)
data["status"] = data["status"].json()
data["site"] = data["site"].json
for field in ["future", "checker"]:
if field in data:
del data[field]
if is_report_per_line:
data["sitename"] = sitename
file.write(json.dumps(data) + "\n")
else:
all_json[sitename] = data
if not is_report_per_line:
file.write(json.dumps(all_json))
"""
XMIND 8 Functions
"""
def save_xmind_report(filename, username, results):
if os.path.exists(filename):
os.remove(filename)
workbook = xmind.load(filename)
sheet = workbook.getPrimarySheet()
design_xmind_sheet(sheet, username, results)
xmind.save(workbook, path=filename)
def add_xmind_subtopic(userlink, k, v, supposed_data):
currentsublabel = userlink.addSubTopic()
field = "fullname" if k == "name" else k
if field not in supposed_data:
supposed_data[field] = []
supposed_data[field].append(v)
currentsublabel.setTitle("%s: %s" % (k, v))
def design_xmind_sheet(sheet, username, results):
alltags = {}
supposed_data = {}
sheet.setTitle("%s Analysis" % (username))
root_topic1 = sheet.getRootTopic()
root_topic1.setTitle("%s" % (username))
undefinedsection = root_topic1.addSubTopic()
undefinedsection.setTitle("Undefined")
alltags["undefined"] = undefinedsection
for website_name in results:
dictionary = results[website_name]
if not dictionary:
continue
result_status = dictionary.get("status")
# TODO: fix the reason
if not result_status or result_status.status != QueryStatus.CLAIMED:
continue
stripped_tags = list(map(lambda x: x.strip(), result_status.tags))
normalized_tags = list(
filter(lambda x: x and not is_country_tag(x), stripped_tags)
)
category = None
for tag in normalized_tags:
if tag in alltags.keys():
continue
tagsection = root_topic1.addSubTopic()
tagsection.setTitle(tag)
alltags[tag] = tagsection
category = tag
section = alltags[category] if category else undefinedsection
userlink = section.addSubTopic()
userlink.addLabel(result_status.site_url_user)
ids_data = result_status.ids_data or {}
for k, v in ids_data.items():
# suppose target data
if isinstance(v, list):
for currentval in v:
add_xmind_subtopic(userlink, k, currentval, supposed_data)
else:
add_xmind_subtopic(userlink, k, v, supposed_data)
# add supposed data
filtered_supposed_data = filter_supposed_data(supposed_data)
if len(filtered_supposed_data) > 0:
undefinedsection = root_topic1.addSubTopic()
undefinedsection.setTitle("SUPPOSED DATA")
for k, v in filtered_supposed_data.items():
currentsublabel = undefinedsection.addSubTopic()
currentsublabel.setTitle("%s: %s" % (k, v))
|
1623684
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("dump")
process.load("L1TriggerConfig.RPCTriggerConfig.L1RPCHsbConfig_cff")
process.l1RPCHsbConfig.hsb0Mask=cms.vint32(0, 1, 2, 3, 0, 1, 2, 3)
process.l1RPCHsbConfig.hsb1Mask=cms.vint32(1, 2, 3, 0, 1, 2, 3, 0)
#useGlobalTag = 'IDEAL_31X'
#process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
#process.GlobalTag.globaltag = useGlobalTag + '::All'
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.debugModules = ['*']
process.MessageLogger.cout = cms.untracked.PSet(
threshold=cms.untracked.string('DEBUG'),
#threshold = cms.untracked.string('INFO'),
#threshold = cms.untracked.string('ERROR'),
DEBUG=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
INFO=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
WARNING=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
ERROR=cms.untracked.PSet(
limit=cms.untracked.int32(-1)
),
default = cms.untracked.PSet(
limit=cms.untracked.int32(-1)
)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.write = cms.EDAnalyzer("DumpL1RPCHsbConfig",
)
process.p1 = cms.Path(process.write)
|
1623715
|
from scipy import interpolate
from pylab import *
from skimage import color
Rg, Gg, Bg = (207., 40., 57.)
texture_input = 'texture1.jpg'
def get_boundary_points(x, y):
tck, u = interpolate.splprep([x, y], s=0, per=1)
unew = np.linspace(u.min(), u.max(), 1000)
xnew, ynew = interpolate.splev(unew, tck, der=0)
tup = c_[xnew.astype(int), ynew.astype(int)].tolist()
coord = list(set(tuple(map(tuple, tup))))
coord = np.array([list(elem) for elem in coord])
return np.array(coord[:, 0], dtype=np.int32), np.array(coord[:, 1], dtype=np.int32)
def get_interior_points(x, y):
nailx = []
naily = []
def ext(a, b, i):
a, b = round(a), round(b)
nailx.extend(arange(a, b, 1).tolist())
naily.extend((ones(b - a) * i).tolist())
x, y = np.array(x), np.array(y)
xmin, xmax = amin(x), amax(x)
xrang = np.arange(xmin, xmax + 1, 1)
for i in xrang:
ylist = y[where(x == i)]
ext(amin(ylist), amax(ylist), i)
return np.array(nailx, dtype=np.int32), np.array(naily, dtype=np.int32)
im = imread('nail_inp.jpg')
text = imread(texture_input)
def apply_nail_polish(x, y, r=Rg, g=Gg, b=Bg):
val = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val[:, 0]), mean(val[:, 1]), mean(val[:, 2])
L1, A1, B1 = color.rgb2lab(np.array((r / 255., g / 255., b / 255.)).reshape(1, 1, 3)).reshape(3, )
ll, aa, bb = L1 - L, A1 - A, B1 - B
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
def apply_texture(x, y):
xmin, ymin = amin(x), amin(y)
X = (x - xmin).astype(int)
Y = (y - ymin).astype(int)
val1 = color.rgb2lab((text[X, Y] / 255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
val2 = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val2[:, 0]), mean(val2[:, 1]), mean(val2[:, 2])
val2[:, 0] = np.clip(val2[:, 0] - L + val1[:, 0], 0, 100)
val2[:, 1] = np.clip(val2[:, 1] - A + val1[:, 1], -127, 128)
val2[:, 2] = np.clip(val2[:, 2] - B + val1[:, 2], -127, 128)
im[x, y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
points = np.loadtxt('nailpoint')
x, y = points[:12, 0], points[:12, 1]
x, y = get_boundary_points(x, y)
x, y = get_interior_points(x, y)
# applyNailPolish(x, y)
apply_texture(x, y)
x, y = points[12:24, 0], points[12:24, 1]
x, y = get_boundary_points(x, y)
x, y = get_interior_points(x, y)
# applyNailPolish(x, y)
apply_texture(x, y)
x, y = points[24:36, 0], points[24:36, 1]
x, y = get_boundary_points(x, y)
x, y = get_interior_points(x, y)
# applyNailPolish(x, y)
apply_texture(x, y)
x, y = points[36:, 0], points[36:, 1]
x, y = get_boundary_points(x, y)
x, y = get_interior_points(x, y)
# applyNailPolish(x, y)
apply_texture(x, y)
figure()
imshow(im)
imsave('output_texture.jpg', im)
show()
|
1623820
|
import logging
import re
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.admin import AdminSite
from django.contrib.admin.models import LogEntry
from django.contrib.admin.views.main import ChangeList
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError, FieldDoesNotExist
from django.db import connection
from django.db.models.functions import Lower
from django.db.utils import OperationalError
from django.forms import modelform_factory
from django.utils.html import mark_safe, format_html
from django.views.decorators.cache import never_cache
from import_export.admin import ExportMixin
from social_django.models import Association, Nonce, UserSocialAuth
from taggit.models import Tag
from taggit.apps import TaggitAppConfig
from collaborative.export import collaborative_modelresource_factory
from collaborative.filters import TagListFilter
from django_models_from_csv.admin import AdminAutoRegistration
from django_models_from_csv.forms import create_taggable_form
from django_models_from_csv.models import DynamicModel, CredentialStore
logger = logging.getLogger(__name__)
class NewUserAdmin(UserAdmin):
list_display = ("username", "email", "first_name", "last_name")
add_form_template = 'admin/auth/user/add_form.html'
def add_view(self, request, *args, **kwargs):
if request.method != "POST":
return super().add_view(request, *args, **kwargs)
password1 = request.POST.get("password1")
password2 = request.POST.get("password2")
if not password1 and not password2:
newpass = User.objects.make_random_password(length=32)
request.POST._mutable = True
request.POST["password1"] = <PASSWORD>
request.POST["password2"] = <PASSWORD>
request.POST._mutable = False
return super().add_view(request, *args, **kwargs)
def widget_for_object_field(obj, field_name):
FieldForm = modelform_factory(
obj.source_dynmodel().get_model(),
fields=(field_name,)
)
widget = FieldForm().fields[field_name].widget
return widget
def make_getter(rel_name, attr_name, getter_name, field=None):
"""
Build a reverse lookup getter, to be attached to the custom
dynamic lookup admin class.
"""
def getter(self):
if not hasattr(self, rel_name):
return None
rel = getattr(self, rel_name).first()
if not rel:
return None
fieldname = "%s__%s" % (rel_name, attr_name)
content_type_id = ContentType.objects.get_for_model(self).id
# handle tagging separately
if attr_name == "tags":
all_tags = rel.tags.all()
tags_html = []
for t in all_tags:
name = t.name
html = (
"<span class='tag-bubble'>"
"<span class='remtag'>x</span>"
"%s</span>"
) % (name)
tags_html.append(html)
return mark_safe(format_html(
"".join(tags_html)
))
# try to lookup choices for field
choices = getattr(
rel, "%s_CHOICES" % attr_name.upper(), []
)
value = getattr(rel, attr_name)
for pk, txt in choices:
if pk == value:
widget = widget_for_object_field(rel, attr_name)
html = widget.render(fieldname, value)
return mark_safe(format_html(
"<span content_type_id='{}' class='inline-editable'>{}</span>",
content_type_id,
html,
))
# no choice found, return field value
widget = widget_for_object_field(rel, attr_name)
html = widget.render(fieldname, value)
return mark_safe(format_html(
"<span content_type_id='{}' class='inline-editable'>{}</span>",
content_type_id,
html,
))
# the header in django admin is named after the function name. if
# this line is removed, the header will be "GETTER" for all derived
# reverse lookup columns
getter.__name__ = getter_name
return getter
class ReimportMixin(ExportMixin):
"""
Mixin for displaying re-import button on admin list view, alongside the
export button (from import_export module).
"""
change_list_template = 'django_models_from_csv/change_list_dynmodel.html'
class CaseInsensitiveChangeList(ChangeList):
"""
Provides case-insensitive ordering for admin list view.
"""
def get_ordering(self, request, queryset):
ordering = super().get_ordering(request, queryset)
for i in range(len(ordering)):
desc = False
fieldname = ordering[i]
if fieldname.startswith("-"):
fieldname = fieldname[1:]
desc = True
try:
field = queryset.model()._meta.get_field(
"id" if fieldname == "pk" else fieldname
)
except FieldDoesNotExist:
continue
f_type = field.db_type(connection)
if f_type != "text":
continue
if desc:
ordering[i] = Lower(fieldname).desc()
else:
ordering[i] = Lower(fieldname)
return ordering
class ReverseFKAdmin(admin.ModelAdmin):
def __init__(self, *args, **kwargs):
"""
Build relations lookup methods, like metadata__status, but
for the reverse foreignkey direction.
"""
super().__init__(*args, **kwargs)
Model, site = args
if "DynamicModel" == Model._meta.object_name:
return
# setup reverse related attr getters so we can do things like
# metadata__status in the reverse direction
for rel in Model._meta.related_objects:
rel_name = rel.get_accessor_name() # "metadata", etc, related_name
rel_model = rel.related_model
if not rel_model:
logger.warning("No related model found!")
continue
for rel_field in rel_model._meta.get_fields():
# build a getter for this relation attribute
attr_name = rel_field.name
# remove auto fields and other fields of that nature. we
# only want the directly acessible fields of this method
if attr_name != "tags":
if rel_field.is_relation: continue
if not hasattr(rel_field, "auto_created"): continue
if rel_field.auto_created: continue
getter_name = "%s_%s" % (rel_name, attr_name)
short_desc = re.sub(r"[\-_]+", " ", attr_name).replace(
"assignee", "assigned to"
)
getter = make_getter(
rel_name, attr_name, getter_name, field=rel_field
)
setattr(self, getter_name, getter)
getattr(self, getter_name).short_description = short_desc
getattr(
self, getter_name
).admin_order_field = "%s__%s" % (rel_name, attr_name)
def get_view_label(self, obj):
return "View"
get_view_label.short_description = 'Records'
def get_changelist(self, request, **kwargs):
# This controls how the admin list view works. Override the
# ChangeList to modify ordering, template, etc
return CaseInsensitiveChangeList
class DynamicModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return DynamicModel.objects.exclude(name__icontains="metadata")
def get_full_deletion_set(self, queryset, only_meta=False):
"""
This is called when a user selects some dynamic models to be
deleted. Since the admin queryset only displays the main models,
not the metadata models, each item in the queryset can be
assumed to be a primary data source model. Here, we want to
also add the corresponding meta models.
"""
pks = []
for model in queryset:
name = model.name
meta = "%smetadata" % (name)
contact_meta = "%scontactmetadata" % (name)
names = (meta, contact_meta)
if not only_meta:
names = (name, meta, contact_meta)
for dynmodel in DynamicModel.objects.filter(name__in=names):
pks.append(dynmodel.pk)
# order this by descending id, since the original model gets
# created first, and we need to delete the reverse fk attached
# models first to avoid a cascade
return DynamicModel.objects.filter(
pk__in=pks
).order_by("-id")
def get_deleted_objects(self, queryset, request):
extended_queryset = self.get_full_deletion_set(queryset)
return super().get_deleted_objects(extended_queryset, request)
def delete_queryset(self, request, queryset):
# for model in queryset:
# for model in self.get_full_deletion_set(queryset):
for model in queryset:
Model = model.get_model()
model_qs = DynamicModel.objects.filter(pk=model.pk)
# wipe all relations, by truncating table
for related in self.get_full_deletion_set(model_qs, only_meta=True):
RelatedModel = related.get_model()
for obj in RelatedModel.objects.all():
obj.delete()
model.delete()
# NOTE: we have to delete these *after* we wipe the original.
# otherwise django throws all kinds of errors or will gracefuly
# succeed but throw errors later during normal admin operation
for metamodel in self.get_full_deletion_set(model_qs, only_meta=True):
metamodel.delete()
class AdminMetaAutoRegistration(AdminAutoRegistration):
def should_register_admin(self, Model):
# metadata models get admin created along with the base model
name = Model._meta.object_name
if name.endswith("metadata"):
return False
return super().should_register_admin(Model)
def create_dynmodel_admin(self, Model):
name = Model._meta.object_name
inheritance = (DynamicModelAdmin,)
return type("%sAdmin" % name, inheritance, {})
def create_admin(self, Model):
name = Model._meta.object_name
if "metadata" in name:
return
if name == "DynamicModel":
return self.create_dynmodel_admin(Model)
meta = []
# find the Metadata model corresponding to the
# csv-backed model we're creating admin for.
# this will end up as an inline admin
for MetaModel in apps.get_models():
meta_name = MetaModel._meta.object_name
# all our additonal related models are in this pattern:
# [model-name][contact|]metadata
if not meta_name.startswith(name) or \
not meta_name.endswith("metadata"):
continue
dynmodel_meta = MetaModel.source_dynmodel(MetaModel)
# for contact log, always show a blank one for easy access
extra = 0
if meta_name.endswith("contactmetadata"):
extra = 1
meta_attrs = {
"model": MetaModel,
"extra": extra,
}
if not meta_name.endswith("contactmetadata"):
fields_meta = self.get_fields(MetaModel, dynmodel=dynmodel_meta)
try:
form_meta = create_taggable_form(MetaModel, fields=fields_meta)
meta_attrs["form"] = form_meta
# no tags on this model
except FieldError:
pass
MetaModelInline = type(
"%sInlineAdmin" % meta_name,
(admin.StackedInline,), meta_attrs)
meta.append(MetaModelInline)
# get searchable and filterable (from column attributes)
# should we order by something? number of results?
try:
model_desc = DynamicModel.objects.get(name=name)
except OperationalError:
return None
except DynamicModel.DoesNotExist:
logger.warning("Model with name: %s doesn't exist. Skipping" % name)
# return super().create_admin(Model)
return None
cols = list(reversed(model_desc.columns))
searchable = [c.get("name") for c in cols if c.get("searchable")]
filterable = [c.get("name") for c in cols if c.get("filterable")]
# Build our CSV-backed admin, attaching inline meta model
dynmodel = Model.source_dynmodel(Model)
fields = self.get_fields(Model, dynmodel=dynmodel)
associated_fields = ["get_view_label"]
if name != "DynamicModel":
test_item = Model.objects.first()
if test_item and hasattr(test_item, "metadata"):
associated_fields.append("metadata_status")
filterable.append("metadata__status")
test_metadata = test_item.metadata.first()
if hasattr(test_metadata, "assigned_to"):
associated_fields.append("metadata_assigned_to")
filterable.append("metadata__assigned_to")
elif hasattr(test_metadata, "assignee"):
associated_fields.append("metadata_assignee")
filterable.append("metadata__assignee")
if test_metadata and hasattr(test_metadata, "tags"):
associated_fields.append("metadata_tags")
filterable.append(TagListFilter)
list_display = associated_fields + fields[:5]
exporter = collaborative_modelresource_factory(
model=Model,
)
# Note that ExportMixin needs to be declared before ReverseFKAdmin
inheritance = (ReimportMixin, ReverseFKAdmin,)
return type("%sAdmin" % name, inheritance, {
"inlines": meta,
"readonly_fields": fields,
"list_display": list_display,
"search_fields": searchable,
"list_filter": filterable,
"resource_class": exporter,
})
# Hide "taggit" name
TaggitAppConfig.verbose_name = "Tagging"
# Remove tagged item inline
class TagAdmin(admin.ModelAdmin):
list_display = ["name", "slug"]
ordering = ["name", "slug"]
search_fields = ["name"]
prepopulated_fields = {"slug": ["name"]}
class Meta:
verbose_name = "Tags"
verbose_name_plural = "Tags"
app_label = "Tags"
@never_cache
def login(*args, **kwargs):
"""
Override login view to hide Google Sign In button if no
OAuth credentials added.
"""
extra_context = kwargs.get("extra_context", {})
have_oauth_creds = CredentialStore.objects.filter(
name="google_oauth_credentials"
).count()
extra_context["google_oauth_credentials"] = have_oauth_creds > 0
if "first_login" in extra_context:
extra_context["first_login"] = False
kwargs["extra_context"] = extra_context
return AdminSite().login(*args, **kwargs)
admin.site.login = login
admin.site.site_header = "Collaborate"
admin.site.index_title = "Welcome"
admin.site.site_title = "Collaborate"
# Remove the "view site" link from the admin header
admin.site.site_url = None
# unregister django social auth from admin
admin.site.unregister(Association)
admin.site.unregister(UserSocialAuth)
admin.site.unregister(Nonce)
admin.site.unregister(User)
admin.site.unregister(Tag)
admin.site.register(Tag, TagAdmin)
admin.site.register(LogEntry)
admin.site.register(User, NewUserAdmin)
def register_dynamic_admins(*args, **kwargs):
AdminMetaAutoRegistration(include="django_models_from_csv.models").register()
# Register the ones that exist ...
register_dynamic_admins()
# ... and register new ones that get created. Otherwise, we'd
# have to actually restart the Django process post-model create
if register_dynamic_admins not in DynamicModel._POST_SAVE_SIGNALS:
DynamicModel._POST_SAVE_SIGNALS.append(register_dynamic_admins)
|
1623829
|
from turbogears.database import PackageHub
# import some basic SQLObject classes for declaring the data model
# (see http://www.sqlobject.org/SQLObject.html#declaring-the-class)
from sqlobject import SQLObject, SQLObjectNotFound, RelatedJoin
# import some datatypes for table columns from SQLObject
# (see http://www.sqlobject.org/SQLObject.html#column-types for more)
from sqlobject import StringCol, UnicodeCol, IntCol, DateTimeCol
__connection__ = hub = PackageHub('tgpisa')
# your data model
# class YourDataClass(SQLObject):
# pass
|
1623830
|
from typing import Iterator
def func(x: int):
"""Function."""
def gen() -> Iterator[int]:
"""Generator."""
yield 1
class C:
"""Class."""
|
1623862
|
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
import os
import suds
from suds.client import Client
from suds.sax.element import Element
import urllib
import urlparse
import base64
from datetime import date
try:
from pysimplesoap.client import SoapClient
except:
# Just make anything since it's not being used
class SoapClient(object):
pass
from shipping import Address
SERVICES = [
('03', 'UPS Ground'),
('11', 'UPS Standard'),
('01', 'UPS Next Day'),
('14', 'UPS Next Day AM'),
('13', 'UPS Next Day Air Saver'),
('02', 'UPS 2nd Day'),
('59', 'UPS 2nd Day AM'),
('12', 'UPS 3-day Select'),
('65', 'UPS Saver'),
('07', 'UPS Worldwide Express'),
('08', 'UPS Worldwide Expedited'),
('54', 'UPS Worldwide Express Plus'),
('96', 'UPS Worldwide Express Freight'),
]
PACKAGES = [
('02', 'Custom Packaging'),
('01', 'UPS Letter'),
('03', 'Tube'),
('04', 'PAK'),
('21', 'UPS Express Box'),
('2a', 'Small Express Box'),
('2b', 'Medium Express Box'),
('2c', 'Large Express Box'),
]
LABEL_TYPE = [
('GIF', 'GIF Format'),
('ZPL','Zebra Label Printer Format')
]
class UPSError(Exception):
def __init__(self, fault, document):
self.fault = fault
self.document = document
code = self.document.childAtPath('/detail/Errors/ErrorDetail/PrimaryErrorCode/Code').getText()
text = self.document.childAtPath('/detail/Errors/ErrorDetail/PrimaryErrorCode/Description').getText()
error_text = 'UPS Error %s: %s' % (code, text)
super(UPSError, self).__init__(error_text)
def recurseElement(element, search, replace):
if search == element.qname():
element.rename(replace)
if element.isempty != True:
for x in element.getChildren():
recurseElement(x,search,replace)
return
from suds.plugin import MessagePlugin
class FixRequestNamespacePlug(MessagePlugin):
#marshalled seems to actually replace properly here, wheras sending does not seem to actually replace properly (bug?)
def marshalled(self, context):
element = context.envelope.getChild('Body')
#context.envelope = context.envelope.replace('ns1:Request>', 'ns0:Request>').replace('ns2:Request>', 'ns1:Request>')
recurseElement(element,'ns1:Request','ns0:Request')
recurseElement(element,'ns2:Request','ns1:Request')
return context
class UPS(object):
def __init__(self, credentials, debug=True):
this_dir = os.path.dirname(os.path.realpath(__file__))
self.wsdl_dir = os.path.join(this_dir, 'wsdl', 'ups')
self.credentials = credentials
self.debug = debug
def _add_security_header(self, client):
security_ns = ('security', 'http://www.ups.com/XMLSchema/XOLTWS/UPSS/v1.0')
security = Element('UPSSecurity', ns=security_ns)
username_token = Element('UsernameToken', ns=security_ns)
username = Element('Username', ns=security_ns).setText(self.credentials['username'])
password = Element('Password', ns=security_ns).setText(self.credentials['password'])
username_token.append(username)
username_token.append(password)
service_token = Element('ServiceAccessToken', ns=security_ns)
license = Element('AccessLicenseNumber', ns=security_ns).setText(self.credentials['access_license'])
service_token.append(license)
security.append(username_token)
security.append(service_token)
client.set_options(soapheaders=security)
def _normalized_country_code(self, country):
country_lookup = {
'usa': 'US',
'united states': 'US',
'canada': 'CA',
}
return country_lookup.get(country.lower(), country)
def wsdlURL(self, wsdl_name):
wsdl_file_path = os.path.join(self.wsdl_dir, wsdl_name)
# Get the os specific url to deal with windows drive letter
wsdl_file_url = urllib.pathname2url(wsdl_file_path)
wsdl_url = urlparse.urljoin('file://', wsdl_file_url)
return wsdl_url
def _get_client(self, wsdl):
wsdl_url = self.wsdlURL(wsdl)
# Setting prefixes=False does not help
return Client(wsdl_url, plugins=[FixRequestNamespacePlug()])
#Loading with ship.wsdl gives this:
#ns0 = "http://www.ups.com/XMLSchema/XOLTWS/Common/v1.0"
#ns1 = "http://www.ups.com/XMLSchema/XOLTWS/Error/v1.1"
#ns2 = "http://www.ups.com/XMLSchema/XOLTWS/IF/v1.0"
#ns3 = "http://www.ups.com/XMLSchema/XOLTWS/Ship/v1.0"
def soapClient(self, wsdl):
wsdl_url = self.wsdlURL(wsdl)
return SoapClient(wsdl=wsdl_url, trace=True)
def _create_shipment(self, client, packages, shipper_address, recipient_address, box_shape, namespace='ns3', create_reference_number=True, can_add_delivery_confirmation=True):
shipment = client.factory.create('{0}:ShipmentType'.format(namespace))
shipper_country = self._normalized_country_code(shipper_address.country)
for i, p in enumerate(packages):
package = client.factory.create('{0}:PackageType'.format(namespace))
if hasattr(package, 'Packaging'):
package.Packaging.Code = box_shape
elif hasattr(package, 'PackagingType'):
package.PackagingType.Code = box_shape
if box_shape == PACKAGES[0][0]:
package.Dimensions.UnitOfMeasurement.Code = 'IN'
if (p.length == 0) or (p.width == 0) or (p.height == 0):
raise UPSError('Dimensions',"Packaging dimensions are required if packaging type is custom")
package.Dimensions.Length = p.length
package.Dimensions.Width = p.width
package.Dimensions.Height = p.height
package.PackageWeight.UnitOfMeasurement.Code = 'LBS'
package.PackageWeight.Weight = p.weight
if can_add_delivery_confirmation and p.require_signature:
package.PackageServiceOptions.DeliveryConfirmation.DCISType = str(p.require_signature)
if p.value:
package.PackageServiceOptions.DeclaredValue.CurrencyCode = 'CAD' if (shipper_country=='CA') else 'USD'
package.PackageServiceOptions.DeclaredValue.MonetaryValue = p.value
if create_reference_number and p.reference:
try:
reference_number = client.factory.create('{0}:ReferenceNumberType'.format(namespace))
reference_number.Value = p.reference
package.ReferenceNumber.append(reference_number)
except suds.TypeNotFound as e:
pass
shipment.Package.append(package)
# Fill in Shipper information
shipfrom_name = shipper_address.name[:35]
shipfrom_company = shipper_address.company_name[:35]
shipment.Shipper.Name = shipfrom_company or shipfrom_name
shipment.Shipper.Address.AddressLine = [ shipper_address.address1, shipper_address.address2 ]
shipment.Shipper.Address.City = shipper_address.city[:30]
shipment.Shipper.Address.PostalCode = shipper_address.zip
shipment.Shipper.Address.CountryCode = shipper_country
shipment.Shipper.ShipperNumber = self.credentials['shipper_number']
# Fill in ShipFrom information
shipfrom_name = shipper_address.name[:35]
shipfrom_company = shipper_address.company_name[:35]
shipment.ShipFrom.Name = shipfrom_company or shipfrom_name
shipment.ShipFrom.Address.AddressLine = [ shipper_address.address1, shipper_address.address2 ]
shipment.ShipFrom.Address.City = shipper_address.city[:30]
shipment.ShipFrom.Address.PostalCode = shipper_address.zip
shipment.ShipFrom.Address.CountryCode = shipper_country
# Fill in ShipTo information
shipto_name = recipient_address.name[:35]
shipto_company = recipient_address.company_name[:35]
shipment.ShipTo.Name = shipto_company or shipto_name
shipment.ShipTo.Address.AddressLine = [ recipient_address.address1, recipient_address.address2 ]
shipment.ShipTo.Address.City = recipient_address.city[:30]
shipment.ShipTo.Address.PostalCode = recipient_address.zip
recipient_country = self._normalized_country_code(recipient_address.country)
shipment.ShipTo.Address.CountryCode = recipient_country
# Only add states if we're shipping to/from US, PR, or Ireland
if shipper_country in ( 'US', 'CA', 'IE' ):
shipment.Shipper.Address.StateProvinceCode = shipper_address.state
shipment.ShipFrom.Address.StateProvinceCode = shipper_address.state
if recipient_country in ( 'US', 'CA', 'IE' ):
shipment.ShipTo.Address.StateProvinceCode = recipient_address.state
if recipient_address.is_residence:
shipment.ShipTo.Address.ResidentialAddressIndicator = ''
return shipment
def rate(self, packages, packaging_type, shipper, recipient):
client = self._get_client('RateWS.wsdl')
self._add_security_header(client)
if not self.debug:
client.set_options(location='https://onlinetools.ups.com/webservices/Rate')
request = client.factory.create('ns0:RequestType')
request.RequestOption = 'Shop'
classification = client.factory.create('ns2:CodeDescriptionType')
classification.Code = '00' # Get rates for the shipper account
shipment = self._create_shipment(client, packages, shipper, recipient, packaging_type, namespace='ns2', create_reference_number=False)
shipment.ShipmentRatingOptions.NegotiatedRatesIndicator = ''
try:
logger.debug(shipment)
self.reply = client.service.ProcessRate(request, CustomerClassification=classification, Shipment=shipment)
logger.debug(self.reply)
service_lookup = dict(SERVICES)
info = list()
for r in self.reply.RatedShipment:
unknown_service = 'Unknown Service: {0}'.format(r.Service.Code)
try:
cost = r.NegotiatedRateCharges.TotalCharge.MonetaryValue
except AttributeError:
cost = r.TotalCharges.MonetaryValue
info.append({
'service': service_lookup.get(r.Service.Code, unknown_service),
'package': '',
'delivery_day': '',
'cost': cost
})
response = { 'status': self.reply.Response.ResponseStatus.Description, 'info': info }
return response
except suds.WebFault as e:
raise UPSError(e.fault, e.document)
def validate(self, recipient):
client = self._get_client('XAV.wsdl')
#client = self.soapClient('XAV.wsdl')
#wsdl_url = self.wsdlURL('XAV.wsdl')
#client = SoapClient(wsdl = wsdl_url, trace=True)
#return client
self._add_security_header(client)
if not self.debug:
client.set_options(location='https://onlinetools.ups.com/webservices/XAV')
request = client.factory.create('ns0:RequestType')
request.RequestOption = 3 # Address Validation w/ Classification
address = client.factory.create('ns2:AddressKeyFormatType')
address.ConsigneeName = recipient.name
address.AddressLine = [ recipient.address1, recipient.address2 ]
address.PoliticalDivision2 = recipient.city
address.PoliticalDivision1 = recipient.state
address.PostcodePrimaryLow = recipient.zip
address.CountryCode = self._normalized_country_code(recipient.country)
try:
reply = client.service.ProcessXAV(request, AddressKeyFormat=address)
result = {}
result['candidates'] = list()
if hasattr(reply, 'Candidate'):
for c in reply.Candidate:
name = c.AddressKeyFormat.ConsigneeName if hasattr(c.AddressKeyFormat, 'ConsigneeName') else ''
a = Address(
name,
c.AddressKeyFormat.AddressLine[0],
c.AddressKeyFormat.PoliticalDivision2,
c.AddressKeyFormat.PoliticalDivision1,
c.AddressKeyFormat.PostcodePrimaryLow,
c.AddressKeyFormat.CountryCode)
if len(c.AddressKeyFormat.AddressLine) > 1:
a.address2 = c.AddressKeyFormat.AddressLine[1]
if a not in result['candidates']:
result['candidates'].append(a)
if hasattr(reply, 'AddressClassification'):
# Need some better names maybe
result['class_code'] = reply.AddressClassification.Code
result['class_description'] = reply.AddressClassification.Description
result['valid'] = hasattr(reply, 'ValidAddressIndicator')
result['ambiguous'] = hasattr(reply, 'AmbiguousAddressIndicator')
return result
except suds.WebFault as e:
raise UPSError(e.fault, e.document)
def label(self, packages, shipper_address, recipient_address, service, box_shape, validate_address, email_notifications=list(), create_commercial_invoice=False, customs_info=[], label_type=LABEL_TYPE[0][0]):
client = self._get_client('Ship.wsdl')
self._add_security_header(client)
if not self.debug:
client.set_options(location='https://onlinetools.ups.com/webservices/Ship')
request = client.factory.create('ns0:RequestType')
request.RequestOption = 'validate' if validate_address else 'nonvalidate'
create_reference_number = recipient_address.country in ( 'US', 'CA', 'PR' ) and shipper_address.country == recipient_address.country
delivery_confirmation = create_reference_number
shipment = self._create_shipment(client, packages, shipper_address, recipient_address, box_shape, create_reference_number=create_reference_number, can_add_delivery_confirmation=delivery_confirmation)
#apparently setting this to '' does not include it in SUDS output, so a space seems to do the trick
shipment.ShipmentRatingOptions.NegotiatedRatesIndicator = ' '
if not create_reference_number:
reference_number = client.factory.create('ns3:ReferenceNumberType')
reference_number.Value = packages[0].reference
shipment.ReferenceNumber.append(reference_number)
# Kinda bad hack for supporting delivery confirmation at the shipment level (as opposed
# to the package level)
package = packages[0]
if not delivery_confirmation and package.require_signature:
# delivery confirmation must be at least 2 (signature required) if we're going international
package.require_signature = package.require_signature if package.require_signature > 1 else 2
shipment.ShipmentServiceOptions.DeliveryConfirmation.DCISType = unicode(package.require_signature)
charge = client.factory.create('ns3:ShipmentChargeType')
charge2 = client.factory.create('ns3:ShipmentChargeType')
charge.Type = '01'
charge2.Type = '02'
charge.BillShipper.AccountNumber = self.credentials['shipper_number']
charge2.BillShipper.AccountNumber = self.credentials['shipper_number']
#Bill duties to shipper if this is an international shipment
if shipment.Shipper.Address.CountryCode != shipment.ShipTo.Address.CountryCode:
shipment.PaymentInformation.ShipmentCharge = [charge,charge2]
else:
shipment.PaymentInformation.ShipmentCharge = charge
shipment.Description = 'Shipment from %s to %s' % (shipper_address.name, recipient_address.name)
shipment.Description = shipment.Description[:50]
shipment.Service.Code = service
shipment.Shipper.AttentionName = shipper_address.name[:35] or shipper_address.company_name[:35]
shipment.Shipper.Phone.Number = shipper_address.phone
shipment.Shipper.EMailAddress = shipper_address.email
shipment.ShipTo.AttentionName = recipient_address.name[:35] or recipient_address.company_name[:35] or ''
shipment.ShipTo.Phone.Number = recipient_address.phone
shipment.ShipTo.EMailAddress = recipient_address.email
# Set the value of the shipment by adding up the value of the individual packages. If the packages don't
# have a value, set it to $100. UPS doesn't charge for insurance up to $100, so this gives maximum benefit
# without costing more.
if shipment.Shipper.Address.CountryCode == 'US' and shipment.ShipTo.Address.CountryCode in ( 'PR', 'CA' ):
shipment.InvoiceLineTotal.CurrencyCode = 'USD'
shipment.InvoiceLineTotal.MonetaryValue = sum([ p.value or 0 for p in packages]) or 100
for i, p in enumerate(shipment.Package):
p.Description = 'Package %d' % i
if email_notifications:
notification = client.factory.create('ns3:NotificationType')
notification.NotificationCode = 6 # Ship Notification
notification.EMail.EMailAddress = email_notifications
shipment.ShipmentServiceOptions.Notification.append(notification)
if create_commercial_invoice:
shipment.ShipmentServiceOptions.InternationalForms.FormType = '01'
shipment.ShipmentServiceOptions.InternationalForms.InvoiceNumber = packages[0].reference
shipment.ShipmentServiceOptions.InternationalForms.InvoiceDate = date.today().strftime('%Y%m%d')
shipment.ShipmentServiceOptions.InternationalForms.ReasonForExport = 'SALE'
shipment.ShipmentServiceOptions.InternationalForms.CurrencyCode = 'USD'
shipto_name = recipient_address.name[:35]
shipto_company = recipient_address.company_name[:35]
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Name = shipto_company or shipto_name
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.AttentionName = shipto_name or shipto_company
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Address.AddressLine = [ recipient_address.address1, recipient_address.address2 ]
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Address.City = recipient_address.city
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Address.PostalCode = recipient_address.zip
recipient_country = self._normalized_country_code(recipient_address.country)
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Address.CountryCode = recipient_country
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Phone.Number = recipient_address.phone
# Only add states if we're shipping to/from US, CA, or Ireland
if recipient_country in ( 'US', 'CA', 'IE' ):
shipment.ShipmentServiceOptions.InternationalForms.Contacts.SoldTo.Address.StateProvinceCode = recipient_address.state
for p in customs_info:
product = client.factory.create('ns2:ProductType')
product.Unit.UnitOfMeasurement.Code = 'PCS'
product.Unit.Value = p.value
product.Unit.Number = p.quantity
product.Description = p.description[:35]
product.OriginCountryCode = self._normalized_country_code(p.country)
''' check for optional commodity code '''
try:
product.CommodityCode = p.commoditycode
except:
pass
shipment.ShipmentServiceOptions.InternationalForms.Product.append(product)
label = client.factory.create('ns3:LabelSpecificationType')
label.LabelImageFormat.Code = label_type
if label_type == LABEL_TYPE[1][0]:
label.LabelStockSize.Height = '6'
label.LabelStockSize.Width = '4'
label.HTTPUserAgent = 'Mozilla/4.5'
try:
self.reply = client.service.ProcessShipment(request, shipment, label)
results = self.reply.ShipmentResults
logger.debug(results)
response = {
'status': self.reply.Response.ResponseStatus.Description,
'shipments': list(),
'international_document': {
'description': None,
'pdf': None
}
}
try:
cost = results.NegotiatedRateCharges.TotalCharge.MonetaryValue
except AttributeError:
cost = results.ShipmentCharges.TotalCharges.MonetaryValue
for p in results.PackageResults:
response['shipments'].append({
'tracking_number': p.TrackingNumber,
'cost': cost,
'label': base64.b64decode(p.ShippingLabel.GraphicImage),
})
try:
response['international_document']['description'] = results.Form.Description
response['international_document']['pdf'] = base64.b64decode(results.Form.Image.GraphicImage)
except AttributeError as e:
pass
return response
except suds.WebFault as e:
print client.last_sent()
raise UPSError(e.fault, e.document)
|
1623872
|
from typing import Optional, Union, Callable
import numpy as np
from caput import memh5
from cora.util.cosmology import Cosmology
from cora.util import units, cubicspline as cs
from draco.core import containers
from ..util.nputil import FloatArrayLike
class InterpolatedFunction(memh5.BasicCont):
"""A container for interpolated 1D functions.
This is intended to allow saving to disk of functions which are expensive to
generate.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We also want to make this happens when an object is created directly and not
# just via `from_file`
self._finish_setup()
def _finish_setup(self):
# Create the function cache dict
self._function_cache = {}
def get_function(self, name: str) -> Callable[[FloatArrayLike], FloatArrayLike]:
"""Get the named function.
Parameters
----------
name
The name of the function to return.
Returns
-------
function
"""
# Return immediately from cache if available
if name not in self._function_cache:
# Check if the underlying data is actually present
if name not in self:
raise ValueError(f"Function {name} unknown.")
dset = self[name]
if len(dset.attrs["axis"]) != 1:
raise RuntimeError("Can only return a single value.")
# Get the abscissa
axis = dset.attrs["axis"][0]
x = self.index_map[axis]
# Get the ordinate
f = dset[:]
interpolation_type = dset.attrs["type"]
data = np.dstack([x, f])[0]
if interpolation_type == "linear":
self._function_cache[name] = cs.Interpolater(data)
elif interpolation_type == "log":
self._function_cache[name] = cs.LogInterpolater(data)
elif interpolation_type == "sinh":
x_t = dset.attrs["x_t"]
f_t = dset.attrs["f_t"]
self._function_cache[name] = cs.SinhInterpolater(data, x_t, f_t)
else: # Unrecognized interpolation type
raise RuntimeError(
f"Unrecognized interpolation type {interpolation_type}"
)
return self._function_cache[name]
def add_function(
self, name: str, x: np.ndarray, f: np.ndarray, type: str = "linear", **kwargs
):
"""Add a function to the container.
Parameters
----------
name
The name of the function to add. This is used to retrieve it later using
`get_function`.
x
The abscissa.
f
The ordinate.
type
The type of the interpolation. Valid options are "linear", "log" or
"sinh".If the latter kwargs accepts additional keys for the `x_t` and
`f_t` parameters. See `sinh_interpolate` for details. By default
use "linear" interpolation.
"""
if name in self:
raise ValueError(f"Function {name} already exists.")
xname = f"x_{name}"
self.create_index_map(xname, x)
dset = self.create_dataset(name, data=f)
dset.attrs["axis"] = [xname]
dset.attrs["type"] = type
# Copy over any kwargs containing extra info for the interpolation
for key, val in kwargs.items():
dset.attrs[key] = val
class CosmologyContainer(containers.ContainerBase):
"""A baseclass for a container that is referenced to a background Cosmology.
Parameters
----------
cosmology
An explicit cosmology instance or dict representation. If not set, the cosmology
*must* get set via `attrs_from`.
"""
def __init__(self, cosmology: Union[Cosmology, dict, None] = None, *args, **kwargs):
super().__init__(*args, **kwargs)
cosmo_dict = self._resolve_args(cosmology, **kwargs)
self.attrs["cosmology"] = cosmo_dict
@staticmethod
def _resolve_args(
cosmology: Union[Cosmology, dict, None] = None,
attrs_from: Optional[containers.ContainerBase] = None,
**kwargs,
):
"""Try and extract a Cosmology dict representation from the parameters.
Useful as subclasses sometimes need access *before* the full class is setup.
"""
# Insert the Cosmological parameters
if cosmology is None:
if attrs_from is not None and "cosmology" in attrs_from.attrs:
cosmology = attrs_from.attrs["cosmology"]
else:
raise ValueError("A cosmology must be supplied.")
elif not isinstance(cosmology, (Cosmology, dict)):
raise TypeError("cosmology argument must be a Cosmology instance.")
if isinstance(cosmology, Cosmology):
cosmology = cosmology.to_dict()
return cosmology
_cosmology_instance = None
@property
def cosmology(self):
"""The background cosmology."""
if self._cosmology_instance is None:
self._cosmology_instance = Cosmology(**self.attrs["cosmology"])
return self._cosmology_instance
class FZXContainer(CosmologyContainer):
"""Container with a comoving radial axis.
This can be specified either directly with a grid in comoving distance, or in
redshift, or 21 cm line frequency. One of these will be considered as defining
the primary axis, and implicitly defines the others. `freq` is the highest
priority, followed by `redshift` and finally the comoving distance `chi`.
Parameters
----------
freq
The radial axis given as 21cm line frequencies.
redshift
The radial axis given as a redshift.
chi
The radial axis given as a comoving distance in Mpc/h.
"""
# Chi is the only required axis, and must be used for any datasets
_axes = ("chi",)
def __init__(
self,
freq: Optional[np.ndarray] = None,
redshift: Optional[np.ndarray] = None,
*args,
**kwargs,
):
# Insert the Cosmological parameters
cosmology = Cosmology(**CosmologyContainer._resolve_args(**kwargs))
# If none of the high priority radial axes are set directly, see if any exist
# in an axes_from object
if freq is None and redshift is None and "axes_from" in kwargs:
if "freq" in kwargs["axes_from"].index_map:
freq = kwargs["axes_from"].index_map["freq"]
elif "redshift" in kwargs["axes_from"].index_map:
redshift = kwargs["axes_from"].index_map["redshift"]
# Go through the high priority axes, and if present generate the lower priority
# ones
if freq is not None:
redshift = units.nu21 / freq - 1.0
if redshift is not None:
kwargs["chi"] = cosmology.comoving_distance(redshift)
super().__init__(*args, **kwargs)
# Create the additional radial axes (if present) and determine the primary
# radial axis.
# NOTE: this must be done *after* the call to `super().__init__(...)` such that
# the container internals are defined
radial_axis = "chi"
if redshift is not None:
self.create_index_map("redshift", redshift)
radial_axis = "redshift"
if freq is not None:
self.create_index_map("freq", freq)
radial_axis = "freq"
# Set the cosmology and radial axis attributes
self.attrs["primary_radial_axis"] = radial_axis
@property
def chi(self):
"""The comoving distance to each radial slice in Mpc / h."""
return self.index_map["chi"]
@property
def redshift(self):
"""The redshift for each radial slice."""
# TODO: derive this one
if "redshift" not in self.index_map:
raise RuntimeError("Container does not have a redshift axis.")
return self.index_map["redshift"]
@property
def freq(self):
"""The 21cm line frequency for each radial slice."""
# TODO: maybe derive this one
if "freq" not in self.index_map:
raise RuntimeError("Container does not have a 21cm frequency axis.")
return self.index_map["freq"]
class MatterPowerSpectrum(CosmologyContainer, InterpolatedFunction):
"""A container to hold a matter power spectrum.
This object can evaluate a power spectrum at specified wavenumbers (in h / Mpc
units) and redshifts.
Parameters
----------
k
Wavenumbers the power spectrum samples are at (in h / Mpc).
ps
Power spectrum samples.
ps_redshift
The redshift the samples are calculated at. Default is z=0.
"""
def __init__(
self,
k: FloatArrayLike,
ps: FloatArrayLike,
*args,
ps_redshift: float = 0.0,
**kwargs,
):
# Initialise the base classes (which sets the cosmology etc)
super().__init__(*args, **kwargs)
# This shouldn't be necessary, but due to a bug in `draco` where ContainerBase
# does not correctly call its superconstructor we need to do this explicitly
self._finish_setup()
# Add the interpolated function
self.add_function("powerspectrum", k, ps, type="log")
self.attrs["ps_redshift"] = ps_redshift
def powerspectrum(
self, k: FloatArrayLike, z: FloatArrayLike = 0.0
) -> FloatArrayLike:
"""Calculate the power spectrum at given wavenumber and redshift.
Parameters
----------
k
The wavenumber (in h / Mpc) to get the power spectrum at.
z : optional
The redshift to calculate the power spectrum at (default z=0).
Returns
-------
ps
The power spectrum.
"""
c = self.cosmology
Dratio = c.growth_factor(z) / c.growth_factor(self._ps_redshift)
return self.get_function("powerspectrum")(k) * Dratio ** 2
def powerspectrum_at_z(
self, z: FloatArrayLike
) -> Callable[[FloatArrayLike], FloatArrayLike]:
"""Return a function which gives the power spectrum at fixed redshift.
Parameters
----------
z
The redshift to fix the power spectrum at.
Returns
-------
psfunc
A function which calculates the power spectrum at given wavenumbers.
"""
def _ps(k):
return self.powerspectrum(k, z)
return _ps
@property
def _ps_redshift(self):
return self.attrs["ps_redshift"]
class CorrelationFunction(CosmologyContainer, InterpolatedFunction):
"""A container to store correlation functions."""
# TODO: at the moment this has no special functionality, but should eventually
# provide specific access to the correlation functions as well as redshift scaling
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This whole constructor shouldn't be necessary, but due to a bug in `draco`
# where ContainerBase does not correctly call its superconstructor we need to do
# this explicitly
self._finish_setup()
class InitialLSS(FZXContainer, containers.HealpixContainer):
"""Container for holding initial LSS fields used for simulation.
These fields are all implicitly the linear fields at redshift z=0.
"""
_dataset_spec = {
"delta": {
"axes": ["chi", "pixel"],
"dtype": np.float64,
"initialise": True,
"distributed": True,
"distributed_axis": "chi",
},
"phi": {
"axes": ["chi", "pixel"],
"dtype": np.float64,
"initialise": True,
"distributed": True,
"distributed_axis": "chi",
},
}
@property
def delta(self):
"""The linear density field at the initial redshift."""
return self.datasets["delta"]
@property
def phi(self):
r"""The potential field at the initial redshift.
This is not the actual gravitational potential, but the Lagrangian potential
defined by:
.. math:: \nabla^2 \phi = - \delta
This is related to the linear gravitational potential :math:`\phi_G` by:
.. math:: \phi = \frac{3}{3 \mathcal{H}^2} \phi_G
"""
return self.datasets["phi"]
class BiasedLSS(FZXContainer, containers.HealpixContainer):
"""A biased large scale structure field.
Parameters
----------
lightcone : bool, optional
Is the field on the lightcone. If not set, default to True.
fixed_redshift : float, optional
If not on the lightcone what is the fixed redshift.
*args, **kwargs
Passed through to the superclasses.
"""
_dataset_spec = {
"delta": {
"axes": ["chi", "pixel"],
"dtype": np.float64,
"initialise": True,
"distributed": True,
"distributed_axis": "chi",
}
}
def __init__(
self,
*args,
lightcone: Optional[bool] = None,
fixed_redshift: Optional[float] = None,
**kwargs,
):
super().__init__(*args, **kwargs)
# Set lightcone taking into account it might have been set already by an
# `attrs_from` argument to the super constructor
if lightcone is not None:
self.attrs["lightcone"] = lightcone
elif "lightcone" not in self.attrs:
self.attrs["lightcone"] = True
if fixed_redshift is not None:
self.attrs["fixed_redshift"] = fixed_redshift
@property
def lightcone(self) -> bool:
"""Is the field on the lightcone or at fixed redshift."""
return bool(self.attrs["lightcone"])
@property
def fixed_redshift(self) -> Optional[float]:
"""The fixed redshift of the field."""
if "fixed_redshift" in self.attrs:
return float(self.attrs["fixed_redshift"])
return None
@property
def delta(self) -> np.ndarray:
r"""The biased field.
As standard this is interpreted as a density contrast, i.e. the field is
.. math:: \delta = (\rho - \bar{\rho}) / \bar{\rho}
Returns
-------
delta
The biased field as a [redshift, pixel] array.
"""
return self.datasets["delta"]
|
1623910
|
from tardis.utilities.simulators.periodicvalue import PeriodicValue
from unittest import TestCase
import yaml
class TestPeriodicValue(TestCase):
def setUp(self) -> None:
self.simulator = PeriodicValue(period=3600, amplitude=0.5, offset=0.5, phase=0)
def test_get_value(self):
self.assertIsInstance(self.simulator.get_value(), float)
self.assertLessEqual(self.simulator.get_value(), 1.0)
def test_construction_by_yaml(self):
periodic_value = yaml.safe_load(
"""!PeriodicValue
period: 3600
amplitude: 0.5
offset: 0.5
phase: 0"""
)
self.assertIsInstance(periodic_value.get_value(), float)
|
1623930
|
import importlib
import time
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.spatial.distance import jensenshannon
from scipy.stats import gaussian_kde
from ..core.prior import PriorDict
from ..core.sampler.base_sampler import SamplerError
from ..core.utils import logger, reflect
from ..gw.source import PARAMETER_SETS
class ProposalCycle(object):
def __init__(self, proposal_list):
self.proposal_list = proposal_list
self.weights = [prop.weight for prop in self.proposal_list]
self.normalized_weights = [w / sum(self.weights) for w in self.weights]
self.weighted_proposal_list = [
np.random.choice(self.proposal_list, p=self.normalized_weights)
for _ in range(10 * int(1 / min(self.normalized_weights)))
]
self.nproposals = len(self.weighted_proposal_list)
self._position = 0
@property
def position(self):
return self._position
@position.setter
def position(self, position):
self._position = np.mod(position, self.nproposals)
def get_proposal(self):
prop = self.weighted_proposal_list[self._position]
self.position += 1
return prop
def __str__(self):
string = "ProposalCycle:\n"
for prop in self.proposal_list:
string += f" {prop}\n"
return string
class BaseProposal(object):
_accepted = 0
_rejected = 0
__metaclass__ = ABCMeta
def __init__(self, priors, weight=1, subset=None):
self._str_attrs = ["acceptance_ratio", "n"]
self.parameters = priors.non_fixed_keys
self.weight = weight
self.subset = subset
# Restrict to a subset
if self.subset is not None:
self.parameters = [p for p in self.parameters if p in subset]
self._str_attrs.append("parameters")
self.ndim = len(self.parameters)
self.prior_boundary_dict = {key: priors[key].boundary for key in priors}
self.prior_minimum_dict = {key: np.max(priors[key].minimum) for key in priors}
self.prior_maximum_dict = {key: np.min(priors[key].maximum) for key in priors}
self.prior_width_dict = {key: np.max(priors[key].width) for key in priors}
@property
def accepted(self):
return self._accepted
@accepted.setter
def accepted(self, accepted):
self._accepted = accepted
@property
def rejected(self):
return self._rejected
@rejected.setter
def rejected(self, rejected):
self._rejected = rejected
@property
def acceptance_ratio(self):
if self.n == 0:
return np.nan
else:
return self.accepted / self.n
@property
def n(self):
return self.accepted + self.rejected
def __str__(self):
msg = [f"{type(self).__name__}("]
for attr in self._str_attrs:
val = getattr(self, attr, "N/A")
if isinstance(val, (float, int)):
val = f"{val:1.2g}"
msg.append(f"{attr}:{val},")
return "".join(msg) + ")"
def apply_boundaries(self, point):
for key in self.parameters:
boundary = self.prior_boundary_dict[key]
if boundary is None:
continue
elif boundary == "periodic":
point[key] = self.apply_periodic_boundary(key, point[key])
elif boundary == "reflective":
point[key] = self.apply_reflective_boundary(key, point[key])
else:
raise SamplerError(f"Boundary {boundary} not implemented")
return point
def apply_periodic_boundary(self, key, val):
minimum = self.prior_minimum_dict[key]
width = self.prior_width_dict[key]
return minimum + np.mod(val - minimum, width)
def apply_reflective_boundary(self, key, val):
minimum = self.prior_minimum_dict[key]
width = self.prior_width_dict[key]
val_normalised = (val - minimum) / width
val_normalised_reflected = reflect(np.array(val_normalised))
return minimum + width * val_normalised_reflected
def __call__(self, chain):
sample, log_factor = self.propose(chain)
sample = self.apply_boundaries(sample)
return sample, log_factor
@abstractmethod
def propose(self, chain):
"""Propose a new point
This method must be overwritten by implemented proposals. The propose
method is called by __call__, then boundaries applied, before returning
the proposed point.
Parameters
----------
chain: bilby.core.sampler.bilby_mcmc.chain.Chain
The chain to use for the proposal
Returns
-------
proposal: bilby.core.sampler.bilby_mcmc.Sample
The proposed point
log_factor: float
The natural-log of the additional factor entering the acceptance
probability to ensure detailed balance. For symmetric proposals,
a value of 0 should be returned.
"""
pass
@staticmethod
def check_dependencies(warn=True):
"""Check the dependencies required to use the proposal
Parameters
----------
warn: bool
If true, print a warning
Returns
-------
check: bool
If true, dependencies exist
"""
return True
class FixedGaussianProposal(BaseProposal):
"""A proposal using a fixed non-correlated Gaussian distribution
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
sigma: float
The scaling factor for proposals
"""
def __init__(self, priors, weight=1, subset=None, sigma=0.01):
super(FixedGaussianProposal, self).__init__(priors, weight, subset)
self.sigmas = {}
for key in self.parameters:
if np.isinf(self.prior_width_dict[key]):
self.prior_width_dict[key] = 1
if isinstance(sigma, float):
self.sigmas[key] = sigma
elif isinstance(sigma, dict):
self.sigmas[key] = sigma[key]
else:
raise SamplerError("FixedGaussianProposal sigma not understood")
def propose(self, chain):
sample = chain.current_sample
for key in self.parameters:
sigma = self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
log_factor = 0
return sample, log_factor
class AdaptiveGaussianProposal(BaseProposal):
def __init__(
self,
priors,
weight=1,
subset=None,
sigma=1,
scale_init=1e0,
stop=1e5,
target_facc=0.234,
):
super(AdaptiveGaussianProposal, self).__init__(priors, weight, subset)
self.sigmas = {}
for key in self.parameters:
if np.isinf(self.prior_width_dict[key]):
self.prior_width_dict[key] = 1
if isinstance(sigma, (float, int)):
self.sigmas[key] = sigma
elif isinstance(sigma, dict):
self.sigmas[key] = sigma[key]
else:
raise SamplerError("AdaptiveGaussianProposal sigma not understood")
self.target_facc = target_facc
self.scale = scale_init
self.stop = stop
self._str_attrs.append("scale")
self._last_accepted = 0
def propose(self, chain):
sample = chain.current_sample
self.update_scale(chain)
if np.random.random() < 1e-3:
factor = 1e1
elif np.random.random() < 1e-4:
factor = 1e2
else:
factor = 1
for key in self.parameters:
sigma = factor * self.scale * self.prior_width_dict[key] * self.sigmas[key]
sample[key] += sigma * np.random.randn()
log_factor = 0
return sample, log_factor
def update_scale(self, chain):
"""
The adaptation of the scale follows (35)/(36) of https://arxiv.org/abs/1409.7215
"""
if 0 < self.n < self.stop:
s_gamma = (self.stop / self.n) ** 0.2 - 1
if self.accepted > self._last_accepted:
self.scale += s_gamma * (1 - self.target_facc) / 100
else:
self.scale -= s_gamma * self.target_facc / 100
self._last_accepted = self.accepted
self.scale = max(self.scale, 1 / self.stop)
class DifferentialEvolutionProposal(BaseProposal):
"""A proposal using Differential Evolution
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
mode_hopping_frac: float
The fraction of proposals which use 'mode hopping'
"""
def __init__(self, priors, weight=1, subset=None, mode_hopping_frac=0.5):
super(DifferentialEvolutionProposal, self).__init__(priors, weight, subset)
self.mode_hopping_frac = mode_hopping_frac
def propose(self, chain):
theta = chain.current_sample
theta1 = chain.random_sample
theta2 = chain.random_sample
if np.random.rand() > self.mode_hopping_frac:
gamma = 1
else:
# Base jump size
gamma = np.random.normal(0, 2.38 / np.sqrt(2 * self.ndim))
# Scale uniformly in log between 0.1 and 10 times
gamma *= np.exp(np.log(0.1) + np.log(100.0) * np.random.rand())
for key in self.parameters:
theta[key] += gamma * (theta2[key] - theta1[key])
log_factor = 0
return theta, log_factor
class UniformProposal(BaseProposal):
"""A proposal using uniform draws from the prior support
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
"""
def __init__(self, priors, weight=1, subset=None):
super(UniformProposal, self).__init__(priors, weight, subset)
def propose(self, chain):
sample = chain.current_sample
for key in self.parameters:
sample[key] = np.random.uniform(
self.prior_minimum_dict[key], self.prior_maximum_dict[key]
)
log_factor = 0
return sample, log_factor
class PriorProposal(BaseProposal):
"""A proposal using draws from the prior distribution
Note: for priors which use interpolation, this proposal can be problematic
as the proposal gets pickled in multiprocessing. Either, use serial
processing (npool=1) or fall back to a UniformProposal.
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
"""
def __init__(self, priors, weight=1, subset=None):
super(PriorProposal, self).__init__(priors, weight, subset)
self.priors = PriorDict({key: priors[key] for key in self.parameters})
def propose(self, chain):
sample = chain.current_sample
lnp_theta = self.priors.ln_prob(sample.as_dict(self.parameters))
prior_sample = self.priors.sample()
for key in self.parameters:
sample[key] = prior_sample[key]
lnp_thetaprime = self.priors.ln_prob(sample.as_dict(self.parameters))
log_factor = lnp_theta - lnp_thetaprime
return sample, log_factor
_density_estimate_doc = """ A proposal using draws from a {estimator} fit to the chain
Parameters
----------
priors: bilby.core.prior.PriorDict
The set of priors
weight: float
Weighting factor
subset: list
A list of keys for which to restrict the proposal to (other parameters
will be kept fixed)
first_fit: int
The number of steps to take before first fitting the KDE
fit_multiplier: int
The multiplier for the next fit
nsamples_for_density: int
The number of samples to use when fitting the KDE
fallback: bilby.core.sampler.bilby_mcmc.proposal.BaseProposal
A proposal to use before first training
scale_fits: int
A scaling factor for both the initial and subsequent updates
"""
class DensityEstimateProposal(BaseProposal):
def __init__(
self,
priors,
weight=1,
subset=None,
first_fit=1000,
fit_multiplier=10,
nsamples_for_density=1000,
fallback=AdaptiveGaussianProposal,
scale_fits=1,
):
super(DensityEstimateProposal, self).__init__(priors, weight, subset)
self.nsamples_for_density = nsamples_for_density
self.fallback = fallback(priors, weight, subset)
self.fit_multiplier = fit_multiplier * scale_fits
# Counters
self.steps_since_refit = 0
self.next_refit_time = first_fit * scale_fits
self.density = None
self.trained = False
self._str_attrs.append("trained")
density_name = None
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
raise NotImplementedError
def _evaluate(self, point):
raise NotImplementedError
def _sample(self, nsamples=None):
raise NotImplementedError
def refit(self, chain):
current_density = self.density
start = time.time()
# Draw two (possibly overlapping) data sets for training and verification
dataset = []
verification_dataset = []
nsamples_for_density = min(chain.position, self.nsamples_for_density)
for _ in range(nsamples_for_density):
s = chain.random_sample
dataset.append([s[key] for key in self.parameters])
s = chain.random_sample
verification_dataset.append([s[key] for key in self.parameters])
# Fit the density
self.density = self._fit(np.array(dataset).T)
# Print a log message
took = time.time() - start
logger.info(
f"{self.density_name} construction at {self.steps_since_refit} finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
)
# Reset counters for next training
self.steps_since_refit = 0
self.next_refit_time *= self.fit_multiplier
# Verify training hasn't overconstrained
new_draws = np.atleast_2d(self._sample(1000))
verification_dataset = np.array(verification_dataset)
fail_parameters = []
for ii, key in enumerate(self.parameters):
std_draws = np.std(new_draws[:, ii])
std_verification = np.std(verification_dataset[:, ii])
if std_draws < 0.1 * std_verification:
fail_parameters.append(key)
if len(fail_parameters) > 0:
logger.info(
f"{self.density_name} construction failed verification and is discarded"
)
self.density = current_density
else:
self.trained = True
def propose(self, chain):
self.steps_since_refit += 1
# Check if we refit
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.refit(chain)
# If KDE is yet to be fitted, use the fallback
if self.trained is False:
return self.fallback.propose(chain)
# Grab the current sample and it's probability under the KDE
theta = chain.current_sample
ln_p_theta = self._evaluate(list(theta.as_dict(self.parameters).values()))
# Sample and update theta
new_sample = self._sample(1)
for key, val in zip(self.parameters, new_sample):
theta[key] = val
# Calculate the probability of the new sample and the KDE
ln_p_thetaprime = self._evaluate(list(theta.as_dict(self.parameters).values()))
# Calculate Q(theta|theta') / Q(theta'|theta)
log_factor = ln_p_theta - ln_p_thetaprime
return theta, log_factor
class KDEProposal(DensityEstimateProposal):
density_name = "Gaussian KDE"
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
return gaussian_kde(dataset)
def _evaluate(self, point):
return self.density.logpdf(point)[0]
def _sample(self, nsamples=None):
return np.atleast_1d(np.squeeze(self.density.resample(nsamples)))
class GMMProposal(DensityEstimateProposal):
density_name = "Gaussian Mixture Model"
__doc__ = _density_estimate_doc.format(estimator=density_name)
def _fit(self, dataset):
from sklearn.mixture import GaussianMixture
density = GaussianMixture(n_components=10)
density.fit(dataset.T)
return density
def _evaluate(self, point):
return np.squeeze(self.density.score_samples(np.atleast_2d(point)))
def _sample(self, nsamples=None):
return np.squeeze(self.density.sample(n_samples=nsamples)[0])
def check_dependencies(warn=True):
if importlib.util.find_spec("sklearn") is None:
if warn:
logger.warning(
"Unable to utilise GMMProposal as sklearn is not installed"
)
return False
else:
return True
class NormalizingFlowProposal(DensityEstimateProposal):
density_name = "Normalizing Flow"
__doc__ = _density_estimate_doc.format(estimator=density_name) + (
"""
js_factor: float
The factor to use in determining the max-JS factor to terminate
training.
max_training_epochs: int
The maximum bumber of traning steps to take
"""
)
def __init__(
self,
priors,
weight=1,
subset=None,
first_fit=1000,
fit_multiplier=10,
max_training_epochs=1000,
scale_fits=1,
nsamples_for_density=1000,
js_factor=10,
fallback=AdaptiveGaussianProposal,
):
super(NormalizingFlowProposal, self).__init__(
priors=priors,
weight=weight,
subset=subset,
first_fit=first_fit,
fit_multiplier=fit_multiplier,
nsamples_for_density=nsamples_for_density,
fallback=fallback,
scale_fits=scale_fits,
)
self.setup_flow()
self.setup_optimizer()
self.max_training_epochs = max_training_epochs
self.js_factor = js_factor
def setup_flow(self):
if self.ndim < 3:
self.setup_basic_flow()
else:
self.setup_NVP_flow()
def setup_NVP_flow(self):
from .flows import NVPFlow
self.flow = NVPFlow(
features=self.ndim,
hidden_features=self.ndim * 2,
num_layers=2,
num_blocks_per_layer=2,
batch_norm_between_layers=True,
batch_norm_within_layers=True,
)
def setup_basic_flow(self):
from .flows import BasicFlow
self.flow = BasicFlow(features=self.ndim)
def setup_optimizer(self):
from torch import optim
self.optimizer = optim.Adam(self.flow.parameters())
def get_training_data(self, chain):
training_data = []
nsamples_for_density = min(chain.position, self.nsamples_for_density)
for _ in range(nsamples_for_density):
s = chain.random_sample
training_data.append([s[key] for key in self.parameters])
return training_data
def _calculate_js(self, validation_samples, training_samples_draw):
# Calculate the maximum JS between the validation and draw
max_js = 0
for i in range(self.ndim):
A = validation_samples[:, i]
B = training_samples_draw[:, i]
xmin = np.min([np.min(A), np.min(B)])
xmax = np.min([np.max(A), np.max(B)])
xval = np.linspace(xmin, xmax, 100)
Apdf = gaussian_kde(A)(xval)
Bpdf = gaussian_kde(B)(xval)
js = jensenshannon(Apdf, Bpdf)
max_js = max(max_js, js)
return np.power(max_js, 2)
def train(self, chain):
logger.info("Starting NF training")
import torch
start = time.time()
training_samples = np.array(self.get_training_data(chain))
validation_samples = np.array(self.get_training_data(chain))
training_tensor = torch.tensor(training_samples, dtype=torch.float32)
max_js_threshold = self.js_factor / self.nsamples_for_density
for epoch in range(1, self.max_training_epochs + 1):
self.optimizer.zero_grad()
loss = -self.flow.log_prob(inputs=training_tensor).mean()
loss.backward()
self.optimizer.step()
# Draw from the current flow
self.flow.eval()
training_samples_draw = (
self.flow.sample(self.nsamples_for_density).detach().numpy()
)
self.flow.train()
if np.mod(epoch, 10) == 0:
max_js_bits = self._calculate_js(
validation_samples, training_samples_draw
)
if max_js_bits < max_js_threshold:
logger.info(
f"Training complete after {epoch} steps, "
f"max_js_bits={max_js_bits:0.5f}<{max_js_threshold}"
)
break
took = time.time() - start
logger.info(
f"Flow training step ({self.steps_since_refit}) finished"
f" for length {chain.position} chain, took {took:0.2f}s."
f" Current accept-ratio={self.acceptance_ratio:0.2f}"
)
self.steps_since_refit = 0
self.next_refit_time *= self.fit_multiplier
self.trained = True
def propose(self, chain):
import torch
self.steps_since_refit += 1
theta = chain.current_sample
# Check if we retrain the NF
testA = self.steps_since_refit >= self.next_refit_time
if testA:
self.train(chain)
if self.trained is False:
return self.fallback.propose(chain)
self.flow.eval()
theta_prime_T = self.flow.sample(1)
logp_theta_prime = self.flow.log_prob(theta_prime_T).detach().numpy()[0]
theta_T = torch.tensor(
np.atleast_2d([theta[key] for key in self.parameters]), dtype=torch.float32
)
logp_theta = self.flow.log_prob(theta_T).detach().numpy()[0]
log_factor = logp_theta - logp_theta_prime
flow_sample_values = np.atleast_1d(np.squeeze(theta_prime_T.detach().numpy()))
for key, val in zip(self.parameters, flow_sample_values):
theta[key] = val
return theta, float(log_factor)
def check_dependencies(warn=True):
if importlib.util.find_spec("nflows") is None:
if warn:
logger.warning(
"Unable to utilise NormalizingFlowProposal as nflows is not installed"
)
return False
else:
return True
class FixedJumpProposal(BaseProposal):
def __init__(self, priors, jumps=1, subset=None, weight=1, scale=1e-4):
super(FixedJumpProposal, self).__init__(priors, weight, subset)
self.scale = scale
if isinstance(jumps, (int, float)):
self.jumps = {key: jumps for key in self.parameters}
elif isinstance(jumps, dict):
self.jumps = jumps
else:
raise SamplerError("jumps not understood")
def propose(self, chain):
sample = chain.current_sample
for key, jump in self.jumps.items():
sign = np.random.randint(2) * 2 - 1
sample[key] += sign * jump + self.epsilon * self.prior_width_dict[key]
log_factor = 0
return sample, log_factor
@property
def epsilon(self):
return self.scale * np.random.normal()
class BaseGravitationalWaveTransientProposal(BaseProposal):
def __init__(self, priors, weight=1):
super(BaseGravitationalWaveTransientProposal, self).__init__(
priors, weight=weight
)
if "phase" in priors:
self.phase_key = "phase"
elif "delta_phase" in priors:
self.phase_key = "delta_phase"
else:
self.phase_key = None
def get_cos_theta_jn(self, sample):
if "cos_theta_jn" in sample.parameter_keys:
cos_theta_jn = sample["cos_theta_jn"]
elif "theta_jn" in sample.parameter_keys:
cos_theta_jn = np.cos(sample["theta_jn"])
else:
raise SamplerError()
return cos_theta_jn
def get_phase(self, sample):
if "phase" in sample.parameter_keys:
return sample["phase"]
elif "delta_phase" in sample.parameter_keys:
cos_theta_jn = self.get_cos_theta_jn(sample)
delta_phase = sample["delta_phase"]
psi = sample["psi"]
phase = np.mod(delta_phase - np.sign(cos_theta_jn) * psi, 2 * np.pi)
else:
raise SamplerError()
return phase
def get_delta_phase(self, phase, sample):
cos_theta_jn = self.get_cos_theta_jn(sample)
psi = sample["psi"]
delta_phase = phase + np.sign(cos_theta_jn) * psi
return delta_phase
class CorrelatedPolarisationPhaseJump(BaseGravitationalWaveTransientProposal):
def __init__(self, priors, weight=1):
super(CorrelatedPolarisationPhaseJump, self).__init__(priors, weight=weight)
def propose(self, chain):
sample = chain.current_sample
phase = self.get_phase(sample)
alpha = sample["psi"] + phase
beta = sample["psi"] - phase
draw = np.random.random()
if draw < 0.5:
alpha = 3.0 * np.pi * np.random.random()
else:
beta = 3.0 * np.pi * np.random.random() - 2 * np.pi
# Update
sample["psi"] = (alpha + beta) * 0.5
phase = (alpha - beta) * 0.5
if self.phase_key == "delta_phase":
sample["delta_phase"] = self.get_delta_phase(phase, sample)
else:
sample["phase"] = phase
log_factor = 0
return sample, log_factor
class PhaseReversalProposal(BaseGravitationalWaveTransientProposal):
def __init__(self, priors, weight=1, fuzz=True, fuzz_sigma=1e-1):
super(PhaseReversalProposal, self).__init__(priors, weight)
self.fuzz = fuzz
self.fuzz_sigma = fuzz_sigma
if self.phase_key is None:
raise SamplerError(
f"{type(self).__name__} initialised without a phase prior"
)
def propose(self, chain):
sample = chain.current_sample
phase = sample[self.phase_key]
sample[self.phase_key] = np.mod(phase + np.pi + self.epsilon, 2 * np.pi)
log_factor = 0
return sample, log_factor
@property
def epsilon(self):
if self.fuzz:
return np.random.normal(0, self.fuzz_sigma)
else:
return 0
class PolarisationReversalProposal(PhaseReversalProposal):
def __init__(self, priors, weight=1, fuzz=True, fuzz_sigma=1e-3):
super(PolarisationReversalProposal, self).__init__(
priors, weight, fuzz, fuzz_sigma
)
self.fuzz = fuzz
def propose(self, chain):
sample = chain.current_sample
psi = sample["psi"]
sample["psi"] = np.mod(psi + np.pi / 2 + self.epsilon, np.pi)
log_factor = 0
return sample, log_factor
class PhasePolarisationReversalProposal(PhaseReversalProposal):
def __init__(self, priors, weight=1, fuzz=True, fuzz_sigma=1e-1):
super(PhasePolarisationReversalProposal, self).__init__(
priors, weight, fuzz, fuzz_sigma
)
self.fuzz = fuzz
def propose(self, chain):
sample = chain.current_sample
sample[self.phase_key] = np.mod(
sample[self.phase_key] + np.pi + self.epsilon, 2 * np.pi
)
sample["psi"] = np.mod(sample["psi"] + np.pi / 2 + self.epsilon, np.pi)
log_factor = 0
return sample, log_factor
class StretchProposal(BaseProposal):
"""The Goodman & Weare (2010) Stretch proposal for an MCMC chain
Implementation of the Stretch proposal using a sample drawn from the chain.
We assume the form of g(z) from Equation (9) of [1].
References
----------
[1] Goodman & Weare (2010)
https://ui.adsabs.harvard.edu/abs/2010CAMCS...5...65G/abstract
"""
def __init__(self, priors, weight=1, subset=None, scale=2):
super(StretchProposal, self).__init__(priors, weight, subset)
self.scale = scale
def propose(self, chain):
sample = chain.current_sample
# Draw a random sample
rand = chain.random_sample
return _stretch_move(sample, rand, self.scale, self.ndim, self.parameters)
def _stretch_move(sample, complement, scale, ndim, parameters):
# Draw z
u = np.random.rand()
z = (u * (scale - 1) + 1) ** 2 / scale
log_factor = (ndim - 1) * np.log(z)
for key in parameters:
sample[key] = complement[key] + (sample[key] - complement[key]) * z
return sample, log_factor
class EnsembleProposal(BaseProposal):
""" Base EnsembleProposal class for ensemble-based swap proposals """
def __init__(self, priors, weight=1):
super(EnsembleProposal, self).__init__(priors, weight)
def __call__(self, chain, chain_complement):
sample, log_factor = self.propose(chain, chain_complement)
sample = self.apply_boundaries(sample)
return sample, log_factor
class EnsembleStretch(EnsembleProposal):
"""The Goodman & Weare (2010) Stretch proposal for an Ensemble
Implementation of the Stretch proposal using a sample drawn from complement.
We assume the form of g(z) from Equation (9) of [1].
References
----------
[1] Goodman & Weare (2010)
https://ui.adsabs.harvard.edu/abs/2010CAMCS...5...65G/abstract
"""
def __init__(self, priors, weight=1, scale=2):
super(EnsembleStretch, self).__init__(priors, weight)
self.scale = scale
def propose(self, chain, chain_complement):
sample = chain.current_sample
completement = chain_complement[
np.random.randint(len(chain_complement))
].current_sample
return _stretch_move(
sample, completement, self.scale, self.ndim, self.parameters
)
def get_default_ensemble_proposal_cycle(priors):
return ProposalCycle([EnsembleStretch(priors)])
def get_proposal_cycle(string, priors, L1steps=1, warn=True):
big_weight = 10
small_weight = 5
tiny_weight = 0.1
if "gwA" in string:
# Parameters for learning proposals
learning_kwargs = dict(
first_fit=1000, nsamples_for_density=10000, fit_multiplier=2
)
plist = [
AdaptiveGaussianProposal(priors, weight=small_weight),
DifferentialEvolutionProposal(priors, weight=small_weight),
]
if GMMProposal.check_dependencies(warn=warn) is False:
raise SamplerError(
"the gwA proposal_cycle required the GMMProposal dependencies"
)
if priors.intrinsic:
intrinsic = PARAMETER_SETS["intrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=big_weight, subset=intrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=intrinsic
),
KDEProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=intrinsic, **learning_kwargs
),
]
if priors.extrinsic:
extrinsic = PARAMETER_SETS["extrinsic"]
plist += [
AdaptiveGaussianProposal(priors, weight=small_weight, subset=extrinsic),
DifferentialEvolutionProposal(
priors, weight=big_weight, subset=extrinsic
),
KDEProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
),
GMMProposal(
priors, weight=big_weight, subset=extrinsic, **learning_kwargs
),
]
if priors.mass:
mass = PARAMETER_SETS["mass"]
plist += [
DifferentialEvolutionProposal(priors, weight=small_weight, subset=mass),
GMMProposal(
priors, weight=small_weight, subset=mass, **learning_kwargs
),
]
if priors.spin:
spin = PARAMETER_SETS["spin"]
plist += [
DifferentialEvolutionProposal(priors, weight=small_weight, subset=spin),
GMMProposal(
priors, weight=small_weight, subset=spin, **learning_kwargs
),
]
if priors.precession:
measured_spin = ["chi_1", "chi_2", "a_1", "a_2", "chi_1_in_plane"]
plist += [
AdaptiveGaussianProposal(
priors, weight=small_weight, subset=measured_spin
),
]
if priors.mass and priors.spin:
primary_spin_and_q = PARAMETER_SETS["primary_spin_and_q"]
plist += [
DifferentialEvolutionProposal(
priors, weight=small_weight, subset=primary_spin_and_q
),
]
if getattr(priors, "tidal", False):
tidal = PARAMETER_SETS["tidal"]
plist += [
DifferentialEvolutionProposal(
priors, weight=small_weight, subset=tidal
),
PriorProposal(priors, weight=small_weight, subset=tidal),
]
if priors.phase:
plist += [
PhaseReversalProposal(priors, weight=tiny_weight),
]
if priors.phase and "psi" in priors.non_fixed_keys:
plist += [
CorrelatedPolarisationPhaseJump(priors, weight=tiny_weight),
PhasePolarisationReversalProposal(priors, weight=tiny_weight),
]
for key in ["time_jitter", "psi", "phi_12", "tilt_2", "lambda_1", "lambda_2"]:
if key in priors.non_fixed_keys:
plist.append(PriorProposal(priors, subset=[key], weight=tiny_weight))
if "chi_1_in_plane" in priors and "chi_2_in_plane" in priors:
in_plane = ["chi_1_in_plane", "chi_2_in_plane", "phi_12"]
plist.append(UniformProposal(priors, subset=in_plane, weight=tiny_weight))
if any("recalib_" in key for key in priors):
calibration = [key for key in priors if "recalib_" in key]
plist.append(PriorProposal(priors, subset=calibration, weight=small_weight))
else:
plist = [
AdaptiveGaussianProposal(priors, weight=big_weight),
DifferentialEvolutionProposal(priors, weight=big_weight),
UniformProposal(priors, weight=tiny_weight),
KDEProposal(priors, weight=big_weight, scale_fits=L1steps),
]
if GMMProposal.check_dependencies(warn=warn):
plist.append(GMMProposal(priors, weight=big_weight, scale_fits=L1steps))
if NormalizingFlowProposal.check_dependencies(warn=warn):
plist.append(
NormalizingFlowProposal(priors, weight=big_weight, scale_fits=L1steps)
)
plist = remove_proposals_using_string(plist, string)
return ProposalCycle(plist)
def remove_proposals_using_string(plist, string):
mapping = dict(
DE=DifferentialEvolutionProposal,
AG=AdaptiveGaussianProposal,
ST=StretchProposal,
FG=FixedGaussianProposal,
NF=NormalizingFlowProposal,
KD=KDEProposal,
GM=GMMProposal,
PR=PriorProposal,
UN=UniformProposal,
)
for element in string.split("no")[1:]:
if element in mapping:
plist = [p for p in plist if isinstance(p, mapping[element]) is False]
return plist
|
1623936
|
from datetime import datetime
from aiogithub import objects
from aiogithub.objects.response import BaseResponseObject
from aiogithub.utils import return_key
class ReviewComment(BaseResponseObject):
_url = 'repos/{login}/{repo}/pulls/comments/{id}'
@staticmethod
def _get_key_mappings():
return {
'user': objects.User
}
@property
@return_key
def id(self) -> int:
pass
@property
@return_key
def diff_hunk(self) -> str:
pass
@property
@return_key
def path(self) -> str:
pass
@property
@return_key
def position(self) -> int:
pass
@property
@return_key
def original_position(self) -> int:
pass
@property
@return_key
def commit_id(self) -> str:
pass
@property
@return_key
def original_commit_id(self) -> str:
pass
@property
@return_key
def user(self) -> 'objects.User':
pass
@property
@return_key
def body(self) -> str:
pass
@property
@return_key
def created_at(self) -> datetime:
pass
@property
@return_key
def updated_at(self) -> datetime:
pass
@property
@return_key
def html_url(self) -> str:
pass
async def get_pull_request(self) -> 'objects.PullRequest':
return await self._get_related_object('pull_request_url',
objects.PullRequest)
|
1623959
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import CCA
from sklearn.metrics import confusion_matrix
import functools
def find_correlation_cca_method1(signal, reference_signals, n_components=2):
r"""
Perform canonical correlation analysis (CCA)
Reference: https://github.com/aaravindravi/Brain-computer-interfaces/blob/master/notebook_12_class_cca.ipynb
Args:
signal : ndarray, shape (channel,time)
Input signal in time domain
reference_signals : ndarray, shape (len(flick_freq),2*num_harmonics,time)
Required sinusoidal reference templates corresponding to the flicker frequency for SSVEP classification
n_components : int, default: 2
number of components to keep (for sklearn.cross_decomposition.CCA)
Returns:
result : array, size: len(flick_freq)
Probability for each reference signals
Dependencies:
CCA : sklearn.cross_decomposition.CCA
np : numpy package
"""
cca = CCA(n_components)
corr = np.zeros(n_components)
result = np.zeros(reference_signals.shape[0])
for freq_idx in range(0, reference_signals.shape[0]):
cca_x = signal.T
cca_y = np.squeeze(reference_signals[freq_idx, :, :]).T
cca.fit(cca_x, cca_y)
a, b = cca.transform(cca_x, cca_y)
for ind_val in range(0, n_components):
corr[ind_val] = np.corrcoef(a[:, ind_val], b[:, ind_val])[0, 1]
result[freq_idx] = np.max(corr)
return result
def calculate_cca(dat_x, dat_y, time_axis=-2):
r"""
Calculate the Canonical Correlation Analysis (CCA).
This method calculates the canonical correlation coefficient and
corresponding weights which maximize a correlation coefficient
between linear combinations of the two specified multivariable
signals.
Reference: https://github.com/venthur/wyrm/blob/master/wyrm/processing.py
Reference: http://en.wikipedia.org/wiki/Canonical_correlation
Args:
dat_x : continuous Data object
these data should have the same length on the time axis.
dat_y : continuous Data object
these data should have the same length on the time axis.
time_axis : int, optional
the index of the time axis in ``dat_x`` and ``dat_y``.
Returns:
rho : float
the canonical correlation coefficient.
w_x, w_y : 1d array
the weights for mapping from the specified multivariable signals
to canonical variables.
Raises:
AssertionError :
If:
* ``dat_x`` and ``dat_y`` is not continuous Data object
* the length of ``dat_x`` and ``dat_y`` is different on the
``time_axis``
Dependencies:
functools : functools package
np : numpy package
"""
assert (len(dat_x.data.shape) == len(dat_y.data.shape) == 2 and
dat_x.data.shape[time_axis] == dat_y.data.shape[time_axis])
if time_axis == 0 or time_axis == -2:
x = dat_x.copy()
y = dat_y.copy()
else:
x = dat_x.T.copy()
y = dat_y.T.copy()
# calculate covariances and it's inverses
x -= x.mean(axis=0)
y -= y.mean(axis=0)
n = x.shape[0]
c_xx = np.dot(x.T, x) / n
c_yy = np.dot(y.T, y) / n
c_xy = np.dot(x.T, y) / n
c_yx = np.dot(y.T, x) / n
ic_xx = np.linalg.pinv(c_xx)
ic_yy = np.linalg.pinv(c_yy)
# calculate w_x
w, v = np.linalg.eig(functools.reduce(np.dot, [ic_xx, c_xy, ic_yy, c_yx]))
w_x = v[:, np.argmax(w)].real
w_x = w_x / np.sqrt(functools.reduce(np.dot, [w_x.T, c_xx, w_x]))
# calculate w_y
w, v = np.linalg.eig(functools.reduce(np.dot, [ic_yy, c_yx, ic_xx, c_xy]))
w_y = v[:, np.argmax(w)].real
w_y = w_y / np.sqrt(functools.reduce(np.dot, [w_y.T, c_yy, w_y]))
# calculate rho
rho = abs(functools.reduce(np.dot, [w_x.T, c_xy, w_y]))
return rho, w_x, w_y
def find_correlation_cca_method2(signal, reference_signals):
r"""
Perform canonical correlation analysis (CCA)
Args:
signal : ndarray, shape (channel,time)
Input signal in time domain
reference_signals : ndarray, shape (len(flick_freq),2*num_harmonics,time)
Required sinusoidal reference templates corresponding to the flicker frequency for SSVEP classification
Returns:
result : array, size: len(flick_freq)
Probability for each reference signals
Dependencies:
np : numpy package
calculate_cca : function
"""
result = np.zeros(reference_signals.shape[0])
for freq_idx in range(0, reference_signals.shape[0]):
dat_y = np.squeeze(reference_signals[freq_idx, :, :]).T
rho, w_x, w_y = calculate_cca(signal.T, dat_y)
result[freq_idx] = rho
return result
def perform_cca(signal, reference_frequencies, labels=None):
r"""
Perform canonical correlation analysis (CCA)
Args:
signal : ndarray, shape (trial,channel,time) or (trial,channel,segment,time)
Input signal in time domain
reference_frequencies : ndarray, shape (len(flick_freq),2*num_harmonics,time)
Required sinusoidal reference templates corresponding to the flicker frequency for SSVEP classification
labels : ndarray shape (classes,)
True labels of `signal`. Index of the classes must be match the sequence of `reference_frequencies`
Returns:
predicted_class : ndarray, size: (classes,)
Predicted classes according to reference_frequencies
accuracy : double
If `labels` are given, `accuracy` denote classification accuracy
Dependencies:
confusion_matrix : sklearn.metrics.confusion_matrix
find_correlation_cca_method1 : function
find_correlation_cca_method2 : function
"""
assert (len(signal.shape) == 3 or len(signal.shape) == 4), "signal shape must be 3 or 4 dimension"
actual_class = []
predicted_class = []
accuracy = None
for trial in range(0, signal.shape[0]):
if len(signal.shape) == 3:
if labels is not None:
actual_class.append(labels[trial])
tmp_signal = signal[trial, :, :]
result = find_correlation_cca_method2(tmp_signal, reference_frequencies)
predicted_class.append(np.argmax(result))
if len(signal.shape) == 4:
for segment in range(0, signal.shape[2]):
if labels is not None:
actual_class.append(labels[trial])
tmp_signal = signal[trial, :, segment, :]
result = find_correlation_cca_method2(tmp_signal, reference_frequencies)
predicted_class.append(np.argmax(result))
actual_class = np.array(actual_class)
predicted_class = np.array(predicted_class)
if labels is not None:
# creating a confusion matrix of true versus predicted classification labels
c_mat = confusion_matrix(actual_class, predicted_class)
# computing the accuracy from the confusion matrix
accuracy = np.divide(np.trace(c_mat), np.sum(np.sum(c_mat)))
return predicted_class, accuracy
|
1623981
|
import argparse
import tensorflow as tf
import json
import numpy as np
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
import sys
from scipy import stats
import time
import h5py
import matplotlib.pyplot as plt
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.dirname(BASE_DIR))
sys.path.append(os.path.join(BASE_DIR, '../../'))
sys.path.append(os.path.join(BASE_DIR, '../../utils'))
sys.path.append(os.path.join(BASE_DIR, '../../models'))
import provider
from utils.test_utils import *
from models import model
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default="3", help='GPU to use [default: GPU 1]')
parser.add_argument('--verbose', action='store_true', help='if specified, use depthconv')
parser.add_argument('--input_list', type=str, default='/media/hdd2/data/pointnet/stanfordindoor/test_hdf5_file_list5.txt', help='Validation data list')
parser.add_argument('--restore_dir', type=str, default='checkpoint/stanford_ins_seg51', help='Directory that stores all training logs and trained models')
FLAGS = parser.parse_args()
# DEFAULT SETTINGS
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu
PRETRAINED_MODEL_PATH = os.path.join(FLAGS.restore_dir,'trained_models/')
RESTORE_DIR = FLAGS.restore_dir
gpu_to_use = 0
OUTPUT_DIR = os.path.join(FLAGS.restore_dir, 'test_results')
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
os.makedirs(os.path.join(OUTPUT_DIR, 'predicted_masks'))
GT_DIR = os.path.join(FLAGS.restore_dir, 'test_gt')
if not os.path.exists(GT_DIR):
os.makedirs(GT_DIR)
os.makedirs(os.path.join(GT_DIR, 'predicted_masks'))
output_verbose = FLAGS.verbose # If true, output all color-coded segmentation obj files
label_bin = np.loadtxt(os.path.join(RESTORE_DIR, 'pergroup_thres.txt'))
min_num_pts_in_group = np.loadtxt(os.path.join(RESTORE_DIR, 'mingroupsize.txt'))
# MAIN SCRIPT
POINT_NUM = 4096 # the max number of points in the all testing data shapes
BATCH_SIZE = 1
NUM_GROUPS = 50
NUM_CATEGORY = 13
TESTING_FILE_LISTFILE = FLAGS.input_list
test_file_list = provider.getDataFiles(TESTING_FILE_LISTFILE)
len_pts_files = len(test_file_list)
def predict():
is_training = False
with tf.device('/gpu:' + str(gpu_to_use)):
is_training_ph = tf.placeholder(tf.bool, shape=())
pointclouds_ph, ptsseglabel_ph, ptsgroup_label_ph, _, _, _ = \
model.placeholder_inputs(BATCH_SIZE, POINT_NUM, NUM_GROUPS, NUM_CATEGORY)
net_output = model.get_model(pointclouds_ph, is_training_ph, group_cate_num=NUM_CATEGORY)
group_mat_label = tf.matmul(ptsgroup_label_ph, tf.transpose(ptsgroup_label_ph, perm=[0, 2, 1])) #BxNxN: (i,j) if i and j in the same group
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
ckptstate = tf.train.get_checkpoint_state(PRETRAINED_MODEL_PATH)
if ckptstate is not None:
LOAD_MODEL_FILE = os.path.join(PRETRAINED_MODEL_PATH,os.path.basename(ckptstate.model_checkpoint_path))
saver.restore(sess, LOAD_MODEL_FILE)
print("Model loaded in file: %s" % LOAD_MODEL_FILE)
else:
print("Fail to load modelfile: %s" % PRETRAINED_MODEL_PATH)
for shape_idx in range(len_pts_files):
cur_train_filename = test_file_list[shape_idx]
if not os.path.exists(cur_train_filename):
continue
cur_data, cur_group, _, cur_seg = provider.loadDataFile_with_groupseglabel_stanfordindoor(cur_train_filename)
seg_output = np.zeros_like(cur_seg)
segrefine_output = np.zeros_like(cur_seg)
group_output = np.zeros_like(cur_group)
conf_output = np.zeros_like(cur_group).astype(np.float)
pts_label_one_hot, pts_label_mask = model.convert_seg_to_one_hot(cur_seg)
pts_group_label, _ = model.convert_groupandcate_to_one_hot(cur_group)
num_data = cur_data.shape[0]
gap = 5e-3
volume_num = int(1. / gap)+1
volume = -1* np.ones([volume_num,volume_num,volume_num]).astype(np.int32)
volume_seg = -1* np.ones([volume_num,volume_num,volume_num, NUM_CATEGORY]).astype(np.int32)
intersections = np.zeros(NUM_CATEGORY)
unions = np.zeros(NUM_CATEGORY)
print('[%d / %d] Block Number: %d' % (shape_idx, len_pts_files, num_data))
print('Loading train file %s' % (cur_train_filename))
flag = True
for j in range(num_data):
pts = cur_data[j,...]
feed_dict = {
pointclouds_ph: np.expand_dims(pts,0),
ptsseglabel_ph: np.expand_dims(pts_label_one_hot[j,...],0),
ptsgroup_label_ph: np.expand_dims(pts_group_label[j,...],0),
is_training_ph: is_training,
}
pts_corr_val0, pred_confidence_val0, ptsclassification_val0, pts_corr_label_val0 = \
sess.run([net_output['simmat'],
net_output['conf'],
net_output['semseg'],
group_mat_label],
feed_dict=feed_dict)
seg = cur_seg[j,...]
ins = cur_group[j,...]
pts_corr_val = np.squeeze(pts_corr_val0[0]) #NxG
pred_confidence_val = np.squeeze(pred_confidence_val0[0])
ptsclassification_val = np.argmax(np.squeeze(ptsclassification_val0[0]),axis=1)
seg = np.squeeze(seg)
# print label_bin
try:
groupids_block, refineseg, group_seg = GroupMerging_old(pts_corr_val, pred_confidence_val, ptsclassification_val, label_bin) # yolo_to_groupt(pts_corr_val, pts_corr_label_val0[0], seg,t=5)
groupids = BlockMerging(volume, volume_seg, pts[:,6:], groupids_block.astype(np.int32), group_seg, gap)
seg_output[j,:] = ptsclassification_val
segrefine_output[j,:] = refineseg
group_output[j,:] = groupids
conf_output[j,:] = pred_confidence_val
###### Generate Results for Evaluation
basefilename = os.path.basename(cur_train_filename).split('.')[-2]
scene_fn = os.path.join(OUTPUT_DIR, '%s.txt' % basefilename)
f_scene = open(scene_fn, 'w')
scene_gt_fn = os.path.join(GT_DIR, '%s.txt' % basefilename)
group_pred = group_output.reshape(-1)
seg_pred = seg_output.reshape(-1)
conf = conf_output.reshape(-1)
pts = cur_data.reshape([-1, 9])
# filtering
x = (pts[:, 6] / gap).astype(np.int32)
y = (pts[:, 7] / gap).astype(np.int32)
z = (pts[:, 8] / gap).astype(np.int32)
for i in range(group_pred.shape[0]):
if volume[x[i], y[i], z[i]] != -1:
group_pred[i] = volume[x[i], y[i], z[i]]
un = np.unique(group_pred)
pts_in_pred = [[] for itmp in range(NUM_CATEGORY)]
group_pred_final = -1 * np.ones_like(group_pred)
grouppred_cnt = 0
for ig, g in enumerate(un): #each object in prediction
if g == -1:
continue
obj_fn = "predicted_masks/%s_%d.txt" % (basefilename, ig)
tmp = (group_pred == g)
sem_seg_g = int(stats.mode(seg_pred[tmp])[0])
if np.sum(tmp) > 0.25 * min_num_pts_in_group[sem_seg_g]:
pts_in_pred[sem_seg_g] += [tmp]
group_pred_final[tmp] = grouppred_cnt
conf_obj = np.mean(conf[tmp])
grouppred_cnt += 1
f_scene.write("%s %d %f\n" % (obj_fn, sem_seg_g, conf_obj))
np.savetxt(os.path.join(OUTPUT_DIR, obj_fn), tmp.astype(np.int), fmt='%d')
seg_gt = cur_seg.reshape(-1)
group_gt = cur_group.reshape(-1)
groupid_gt = seg_gt * 1000 + group_gt
np.savetxt(scene_gt_fn, groupid_gt.astype(np.int64), fmt='%d')
f_scene.close()
if output_verbose:
output_color_point_cloud(pts[:, 6:], seg_pred.astype(np.int32),
os.path.join(OUTPUT_DIR, '%s_segpred.obj' % (obj_fn)))
output_color_point_cloud(pts[:, 6:], group_pred_final.astype(np.int32),
os.path.join(OUTPUT_DIR, '%s_grouppred.obj' % (obj_fn)))
with tf.Graph().as_default():
predict()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.